prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from functions import downloadECBBonds
# The path to the folder in the S3 bucket in which the data is stored
eikon_data_folder = "https://s3groupsweden.s3.eu-central-1.amazonaws.com/Data/EIKON/"
# Get dataframes with the data in the files holdingsECBEnvironment.txt and holdingsECBGeneralInfo.txt
def get_eikon_data_general():
eikon_data_environment =
|
pd.read_csv(eikon_data_folder+"holdingsECBEnvironment.txt",sep="\t")
|
pandas.read_csv
|
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
""" Additional utils """
import csv
import json
import pandas as pd
from colorama import Fore, Back
from tabulate import tabulate
OK_GREEN = "\033[92m"
GREY = "\33[90m"
END = "\033[0m"
def print_yellow(string):
"""Print yellow text"""
print(f"{Fore.YELLOW}{string}{END}")
def print_green(string):
"""Print green text"""
print(f"{Fore.GREEN}{string}{END}")
def print_grey(string):
"""Print grey text"""
print(f"{Fore.GREY}{string}{END}")
def print_red(string):
"""Print red text"""
print(f"{Fore.RED}{string}{END}")
def convert_red(string):
"""Return red text"""
return f"{Fore.RED}{string}{END}"
def convert_green(string):
"""Return green text"""
return f"{Fore.GREEN}{string}{END}"
def convert_yellow(string):
"""Return green text"""
return f"{Fore.YELLOW}{string}{END}"
def pretty_grid_keys(output: dict):
"""Print grid keys from dictionary"""
df =
|
pd.DataFrame(output)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
from swstats import *
from scipy.stats import ttest_ind
import xlsxwriter
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.proportion import proportions_ztest
debugging = False
def pToSign(pval):
if pval < .001:
return "***"
elif pval < .01:
return "**"
elif pval < .05:
return "*"
elif pval < .1:
return "+"
else:
return ""
def analyzeExperiment_ContinuousVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1mean = np.mean(order_value_control_group)
arm1sd = np.std(order_value_control_group)
arm1text = "" + "{:.2f}".format(arm1mean) + " (" + "{:.2f}".format(arm1sd) + ")"
# Effect of Arm 2
arm2mean = np.mean(order_value_arm2_group)
arm2sd = np.std(order_value_arm2_group)
tscore, pval2 = ttest_ind(order_value_control_group, order_value_arm2_group)
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2mean) + " (" + "{:.2f}".format(arm2sd) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3mean = np.mean(order_value_arm3_group)
arm3sd = np.std(order_value_arm3_group)
tscore, pval3 = ttest_ind(order_value_control_group, order_value_arm3_group)
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3mean) + " (" + "{:.2f}".format(arm3sd) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4mean = np.mean(order_value_arm4_group)
arm4sd = np.std(order_value_arm4_group)
tscore, pval4 = ttest_ind(order_value_control_group, order_value_arm4_group)
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4mean) + " (" + "{:.2f}".format(arm4sd) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
tscore, pval2to4 = ttest_ind(order_value_arm2_group, order_value_arm4_group)
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4mean - arm2mean) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
tscore, pval3to4 = ttest_ind(order_value_arm3_group, order_value_arm4_group)
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4mean - arm3mean) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeExperiment_BinaryVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1Successes = sum(order_value_control_group.isin([True, 1]))
arm1Count = sum(order_value_control_group.isin([True, False, 1, 0]))
arm1PercentSuccess = arm1Successes/arm1Count
arm1text = "" + "{:.2f}".format(arm1PercentSuccess) + " (" + "{:.0f}".format(arm1Successes) + ")"
# Effect of Arm 2
arm2Successes = sum(order_value_arm2_group.isin([True, 1]))
arm2Count = sum(order_value_arm2_group.isin([True, False, 1, 0]))
arm2PercentSuccess = arm2Successes/arm2Count
zstat, pval2 = proportions_ztest(count=[arm1Successes,arm2Successes], nobs=[arm1Count,arm2Count], alternative='two-sided')
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2PercentSuccess) + " (" + "{:.0f}".format(arm2Successes) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3Successes = sum(order_value_arm3_group.isin([True, 1]))
arm3Count = sum(order_value_arm3_group.isin([True, False, 1, 0]))
arm3PercentSuccess = arm3Successes/arm3Count
zstat, pval3 = proportions_ztest(count=[arm1Successes,arm3Successes], nobs=[arm1Count,arm3Count], alternative='two-sided')
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3PercentSuccess) + " (" + "{:.0f}".format(arm3Successes) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4Successes = sum(order_value_arm4_group.isin([True, 1]))
arm4Count = sum(order_value_arm4_group.isin([True, False, 1, 0]))
arm4PercentSuccess = arm4Successes/arm4Count
zstat, pval4 = proportions_ztest(count=[arm1Successes,arm4Successes], nobs=[arm1Count,arm4Count], alternative='two-sided')
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4PercentSuccess) + " (" + "{:.0f}".format(arm4Successes) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
zstat, pval2to4 = proportions_ztest(count=[arm2Successes,arm4Successes], nobs=[arm2Count,arm4Count], alternative='two-sided')
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm2PercentSuccess) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
zstat, pval3to4 = proportions_ztest(count=[arm3Successes,arm4Successes], nobs=[arm3Count,arm4Count], alternative='two-sided')
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm3PercentSuccess) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeResults(dta, outputFileName, scoringVars, surveyVersion, primaryOnly=True):
if primaryOnly:
dta = dta[dta.IsPrimaryWave].copy()
dataDir = "C:/Dev/src/ssascams/data/"
''' Analyze the answers'''
writer = pd.ExcelWriter(dataDir + 'RESULTS_' + outputFileName + '.xlsx', engine='xlsxwriter')
# ###############
# Export summary stats
# ###############
demographicVars = ['trustScore', 'TotalIncome', 'incomeAmount', 'Race', 'race5', 'employment3', 'educYears', 'Married', 'marriedI', 'Age', 'ageYears', 'Gender', 'genderI']
allSummaryVars = ["percentCorrect", "surveyArm", "Wave", "daysFromTrainingToTest"] + scoringVars + demographicVars
summaryStats = dta[allSummaryVars].describe()
summaryStats.to_excel(writer, sheet_name="summary_FullPop", startrow=0, header=True, index=True)
grouped = dta[allSummaryVars].groupby(["surveyArm"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['VarName', 'Metric'], inplace=True)
summaryStats.to_excel(writer, sheet_name="summary_ByArm", startrow=0, header=True, index=False)
if ~primaryOnly:
grouped = dta[allSummaryVars].groupby(["surveyArm", "Wave"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['Wave','VarName', 'Metric'], inplace=True)
# grouped.describe().reset_index().pivot(index='name', values='score', columns='level_1')
summaryStats.to_excel(writer, sheet_name="summary_ByArmAndWave", startrow=0, header=True, index=False)
# summaryStats.to_csv(dataDir + "RESULTS_" + outputFileName + '.csv')
# ###############
# RQ1: What is the effect?
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numFakeLabeledReal")
row3 = analyzeExperiment_ContinuousVar(dta, "numRealLabeledFake")
row4 = analyzeExperiment_ContinuousVar(dta, "percentCorrect")
|
pd.DataFrame([row1, row2, row3, row4])
|
pandas.DataFrame
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from itertools import product
import units
import moments
def _format_obs_history(obs_history, field, save_to_disk=None):
"""
Parameters
----------
obs_history : Pandas.DataFrame
field : Pandas.DataFrame
save_to_disk : str
Note
----
We use the dithered RA, Dec and express all positions in arcsec.
Returns
-------
DataFrame obs_history, formatted with new column conventions and units
"""
# Join with Field table
obs_history =
|
pd.merge(obs_history, field, left_on='Field_fieldID', right_on='fieldID')
|
pandas.merge
|
import pandas as pd
#designed to be run from the root folder of the project
# read in raw datasets
df =
|
pd.read_csv("data/raw/world-data-gapminder_raw.csv")
|
pandas.read_csv
|
"""
Preprocess sites scripts.
Written by <NAME>.
Winter 2020
"""
import os
import configparser
import json
import csv
import math
import glob
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Polygon, MultiPolygon, mapping, shape, MultiLineString, LineString
from shapely.ops import transform, unary_union, nearest_points
import fiona
from fiona.crs import from_epsg
import rasterio
from rasterio.mask import mask
from rasterstats import zonal_stats
import networkx as nx
from rtree import index
import numpy as np
import random
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
def find_country_list(continent_list):
"""
This function produces country information by continent.
Parameters
----------
continent_list : list
Contains the name of the desired continent, e.g. ['Africa']
Returns
-------
countries : list of dicts
Contains all desired country information for countries in
the stated continent.
"""
glob_info_path = os.path.join(BASE_PATH, 'global_information.csv')
countries = pd.read_csv(glob_info_path, encoding = "ISO-8859-1")
countries = countries[countries.exclude != 1]
if len(continent_list) > 0:
data = countries.loc[countries['continent'].isin(continent_list)]
else:
data = countries
output = []
for index, country in data.iterrows():
output.append({
'country_name': country['country'],
'iso3': country['ISO_3digit'],
'iso2': country['ISO_2digit'],
'regional_level': country['lowest'],
'region': country['region']
})
return output
def process_coverage_shapes(country):
"""
Load in coverage maps, process and export for each country.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
iso2 = country['iso2']
technologies = [
'GSM',
'3G',
'4G'
]
for tech in technologies:
folder_coverage = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
filename = 'coverage_{}.shp'.format(tech)
path_output = os.path.join(folder_coverage, filename)
if os.path.exists(path_output):
continue
print('----')
print('Working on {} in {}'.format(tech, iso3))
filename = 'Inclusions_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
inclusions = gpd.read_file(os.path.join(folder, filename))
if iso2 in inclusions['CNTRY_ISO2']:
filename = 'MCE_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
else:
filename = 'OCI_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_OCI')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
if len(coverage) > 0:
print('Dissolving polygons')
coverage['dissolve'] = 1
coverage = coverage.dissolve(by='dissolve', aggfunc='sum')
coverage = coverage.to_crs('epsg:3857')
print('Excluding small shapes')
coverage['geometry'] = coverage.apply(clean_coverage,axis=1)
print('Removing empty and null geometries')
coverage = coverage[~(coverage['geometry'].is_empty)]
coverage = coverage[coverage['geometry'].notnull()]
print('Simplifying geometries')
coverage['geometry'] = coverage.simplify(
tolerance = 0.005,
preserve_topology=True).buffer(0.0001).simplify(
tolerance = 0.005,
preserve_topology=True
)
coverage = coverage.to_crs('epsg:4326')
if not os.path.exists(folder_coverage):
os.makedirs(folder_coverage)
coverage.to_file(path_output, driver='ESRI Shapefile')
return #print('Processed coverage shapes')
def process_regional_coverage(country):
"""
This functions estimates the area covered by each cellular
technology.
Parameters
----------
country : dict
Contains specific country parameters.
Returns
-------
output : dict
Results for cellular coverage by each technology for
each region.
"""
level = country['regional_level']
iso3 = country['iso3']
gid_level = 'GID_{}'.format(level)
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
technologies = [
'GSM',
'3G',
'4G'
]
output = {}
for tech in technologies:
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
path = os.path.join(folder, 'coverage_{}.shp'.format(tech))
if os.path.exists(path):
coverage = gpd.read_file(path, encoding="utf-8")
segments = gpd.overlay(regions, coverage, how='intersection')
tech_coverage = {}
for idx, region in segments.iterrows():
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
tech_coverage[region[gid_level]] = area_km2
output[tech] = tech_coverage
return output
def get_regional_data(country):
"""
Extract regional data including luminosity and population.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
level = country['regional_level']
gid_level = 'GID_{}'.format(level)
path_output = os.path.join(DATA_INTERMEDIATE, iso3, 'regional_coverage.csv')
if os.path.exists(path_output):
return #print('Regional data already exists')
path_country = os.path.join(DATA_INTERMEDIATE, iso3,
'national_outline.shp')
coverage = process_regional_coverage(country)
single_country = gpd.read_file(path_country)
# print('----')
# print('working on {}'.format(iso3))
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3,
'settlements.tif')
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
results = []
for index, region in regions.iterrows():
with rasterio.open(path_settlements) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
if 'GSM' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['GSM']:
coverage_GSM_km2 = coverage['GSM'][region[gid_level]]
else:
coverage_GSM_km2 = 0
else:
coverage_GSM_km2 = 0
if '3G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['3G']:
coverage_3G_km2 = coverage['3G'][region[gid_level]]
else:
coverage_3G_km2 = 0
else:
coverage_3G_km2 = 0
if '4G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['4G']:
coverage_4G_km2 = coverage['4G'][region[gid_level]]
else:
coverage_4G_km2 = 0
else:
coverage_4G_km2 = 0
results.append({
'GID_0': region['GID_0'],
'GID_id': region[gid_level],
'GID_level': gid_level,
# 'mean_luminosity_km2': luminosity_summation / area_km2 if luminosity_summation else 0,
'population': population_summation,
# 'pop_under_10_pop': pop_under_10_pop,
'area_km2': area_km2,
'population_km2': population_summation / area_km2 if population_summation else 0,
# 'pop_adults_km2': ((population_summation - pop_under_10_pop) /
# area_km2 if pop_under_10_pop else 0),
'coverage_GSM_percent': round(coverage_GSM_km2 / area_km2 * 100 if coverage_GSM_km2 else 0, 1),
'coverage_3G_percent': round(coverage_3G_km2 / area_km2 * 100 if coverage_3G_km2 else 0, 1),
'coverage_4G_percent': round(coverage_4G_km2 / area_km2 * 100 if coverage_4G_km2 else 0, 1),
})
# print('Working on backhaul')
backhaul_lut = estimate_backhaul(iso3, country['region'], '2025')
# print('Working on estimating sites')
results = estimate_sites(results, iso3, backhaul_lut)
results_df = pd.DataFrame(results)
results_df.to_csv(path_output, index=False)
# print('Completed {}'.format(single_country.NAME_0.values[0]))
return #print('Completed night lights data querying')
def find_pop_under_10(region, iso3):
"""
Find the estimated population under 10 years old.
Parameters
----------
region : pandas series
The region being modeled.
iso3 : string
ISO3 country code.
Returns
-------
population : int
Population sum under 10 years of age.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'under_10')
all_paths = glob.glob(path + '/*.tif')
population = []
for path in all_paths:
with rasterio.open(path) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
if population_summation is not None:
population.append(population_summation)
return sum(population)
def estimate_sites(data, iso3, backhaul_lut):
"""
Estimate the sites by region.
Parameters
----------
data : dataframe
Pandas df with regional data.
iso3 : string
ISO3 country code.
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
existing_site_data_path = os.path.join(DATA_INTERMEDIATE, iso3, 'sites', 'sites.csv')
existing_site_data = {}
if os.path.exists(existing_site_data_path):
site_data = pd.read_csv(existing_site_data_path)
site_data = site_data.to_dict('records')
for item in site_data:
existing_site_data[item['GID_id']] = item['sites']
population = 0
for region in data:
if region['population'] == None:
continue
population += int(region['population'])
path = os.path.join(DATA_RAW, 'wb_mobile_coverage', 'wb_population_coverage_2G.csv')
coverage = pd.read_csv(path, encoding='latin-1')
coverage = coverage.loc[coverage['Country ISO3'] == iso3]
if len(coverage) > 1:
coverage = coverage['2020'].values[0]
else:
coverage = 0
population_covered = population * (coverage / 100)
path = os.path.join(DATA_RAW, 'real_site_data', 'site_counts.csv')
towers = pd.read_csv(path, encoding = "ISO-8859-1")
towers = towers.loc[towers['iso3'] == iso3]
towers = towers['sites'].values[0]
if np.isnan(towers):
towers = 0
towers_per_pop = 0
else:
towers_per_pop = towers / population_covered
tower_backhaul_lut = estimate_backhaul_type(backhaul_lut)
data = sorted(data, key=lambda k: k['population_km2'], reverse=True)
covered_pop_so_far = 0
for region in data:
#first try to use actual data
if len(existing_site_data) > 0:
sites_estimated_total = existing_site_data[region['GID_id']]
if region['area_km2'] > 0:
sites_estimated_km2 = sites_estimated_total / region['area_km2']
else:
sites_estimated_km2 = 0
#or if we don't have data estimates of sites per area
else:
if covered_pop_so_far < population_covered:
sites_estimated_total = region['population'] * towers_per_pop
sites_estimated_km2 = region['population_km2'] * towers_per_pop
else:
sites_estimated_total = 0
sites_estimated_km2 = 0
backhaul_fiber = 0
backhaul_copper = 0
backhaul_wireless = 0
backhaul_satellite = 0
for i in range(1, int(round(sites_estimated_total)) + 1):
num = random.uniform(0, 1)
if num <= tower_backhaul_lut['fiber']:
backhaul_fiber += 1
elif tower_backhaul_lut['fiber'] < num <= tower_backhaul_lut['copper']:
backhaul_copper += 1
elif tower_backhaul_lut['copper'] < num <= tower_backhaul_lut['microwave']:
backhaul_wireless += 1
elif tower_backhaul_lut['microwave'] < num:
backhaul_satellite += 1
output.append({
'GID_0': region['GID_0'],
'GID_id': region['GID_id'],
'GID_level': region['GID_level'],
# 'mean_luminosity_km2': region['mean_luminosity_km2'],
'population': region['population'],
# 'pop_under_10_pop': region['pop_under_10_pop'],
'area_km2': region['area_km2'],
'population_km2': region['population_km2'],
# 'pop_adults_km2': region['pop_adults_km2'],
'coverage_GSM_percent': region['coverage_GSM_percent'],
'coverage_3G_percent': region['coverage_3G_percent'],
'coverage_4G_percent': region['coverage_4G_percent'],
'total_estimated_sites': sites_estimated_total,
'total_estimated_sites_km2': sites_estimated_km2,
'sites_3G': sites_estimated_total * (region['coverage_3G_percent'] /100),
'sites_4G': sites_estimated_total * (region['coverage_4G_percent'] /100),
'backhaul_fiber': backhaul_fiber,
'backhaul_copper': backhaul_copper,
'backhaul_wireless': backhaul_wireless,
'backhaul_satellite': backhaul_satellite,
})
if region['population'] == None:
continue
covered_pop_so_far += region['population']
return output
def estimate_backhaul(iso3, region, year):
"""
Get the correct backhaul composition for the region.
Parameters
----------
iso3 : string
ISO3 country code.
region : string
The continent the country is part of.
year : int
The year of the backhaul composition desired.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
path = os.path.join(BASE_PATH, 'raw', 'gsma', 'backhaul.csv')
backhaul_lut = pd.read_csv(path)
backhaul_lut = backhaul_lut.to_dict('records')
for item in backhaul_lut:
if region == item['Region'] and int(item['Year']) == int(year):
output.append({
'tech': item['Technology'],
'percentage': int(item['Value']),
})
return output
def estimate_backhaul_type(backhaul_lut):
"""
Process the tower backhaul lut.
Parameters
----------
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : dict
Tower backhaul lookup table.
"""
output = {}
preference = [
'fiber',
'copper',
'microwave',
'satellite'
]
perc_so_far = 0
for tech in preference:
for item in backhaul_lut:
if tech == item['tech'].lower():
perc = item['percentage']
output[tech] = (perc + perc_so_far) / 100
perc_so_far += perc
return output
def area_of_polygon(geom):
"""
Returns the area of a polygon. Assume WGS84 before converting
to projected crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
poly_area : int
Area of polygon in square kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
poly_area, poly_perimeter = geod.geometry_area_perimeter(
geom
)
return abs(int(poly_area))
def length_of_line(geom):
"""
Returns the length of a linestring. Assume WGS84 as crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
total_length : int
Length of the linestring given in kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
total_length = geod.line_length(*geom.xy)
return abs(int(total_length))
def estimate_numers_of_sites(linear_regressor, x_value):
"""
Function to predict the y value from the stated x value.
Parameters
----------
linear_regressor : object
Linear regression object.
x_value : float
The stated x value we want to use to predict y.
Returns
-------
result : float
The predicted y value.
"""
if not x_value == 0:
result = linear_regressor.predict(x_value)
result = result[0,0]
else:
result = 0
return result
def exclude_small_shapes(x):
"""
Remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
return x.geometry
# if its a multipolygon, we start trying to simplify
# and remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
area1 = 0.01
area2 = 50
# dont remove shapes if total area is already very small
if x.geometry.area < area1:
return x.geometry
# remove bigger shapes if country is really big
if x['GID_0'] in ['CHL','IDN']:
threshold = 0.01
elif x['GID_0'] in ['RUS','GRL','CAN','USA']:
threshold = 0.01
elif x.geometry.area > area2:
threshold = 0.1
else:
threshold = 0.001
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def clean_coverage(x):
"""
Cleans the coverage polygons by remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
if x.geometry.area > 1e7:
return x.geometry
# if its a multipolygon, we start trying to simplify and
# remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
threshold = 1e7
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def estimate_core_nodes(iso3, pop_density_km2, settlement_size):
"""
This function identifies settlements which exceed a desired settlement
size. It is assumed fiber exists at settlements over, for example,
20,000 inhabitants.
Parameters
----------
iso3 : string
ISO 3 digit country code.
pop_density_km2 : int
Population density threshold for identifying built up areas.
settlement_size : int
Overall sittelement size assumption, e.g. 20,000 inhabitants.
Returns
-------
output : list of dicts
Identified major settlements as Geojson objects.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
with rasterio.open(path) as src:
data = src.read()
threshold = pop_density_km2
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = pd.concat([shapes_df, stats_df], axis=1).drop(columns='value')
nodes = nodes[nodes['sum'] >= settlement_size]
nodes['geometry'] = nodes['geometry'].centroid
nodes = get_points_inside_country(nodes, iso3)
output = []
for index, item in enumerate(nodes.to_dict('records')):
output.append({
'type': 'Feature',
'geometry': mapping(item['geometry']),
'properties': {
'network_layer': 'core',
'id': 'core_{}'.format(index),
'node_number': index,
}
})
return output
def get_points_inside_country(nodes, iso3):
"""
Check settlement locations lie inside target country.
Parameters
----------
nodes : dataframe
A geopandas dataframe containing settlement nodes.
iso3 : string
ISO 3 digit country code.
Returns
-------
nodes : dataframe
A geopandas dataframe containing settlement nodes.
"""
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
national_outline = gpd.read_file(path)
bool_list = nodes.intersects(national_outline.unary_union)
nodes = pd.concat([nodes, bool_list], axis=1)
nodes = nodes[nodes[0] == True].drop(columns=0)
return nodes
def generate_agglomeration_lut(country):
"""
Generate a lookup table of agglomerations.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations')
if not os.path.exists(folder):
os.makedirs(folder)
path_output = os.path.join(folder, 'agglomerations.shp')
if os.path.exists(path_output):
return print('Agglomeration processing has already completed')
print('Working on {} agglomeration lookup table'.format(iso3))
filename = 'regions_{}_{}.shp'.format(regional_level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path, crs="epsg:4326")
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
settlements = rasterio.open(path_settlements, 'r+')
settlements.nodata = 255
settlements.crs = {"epsg:4326"}
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
if not os.path.exists(folder_tifs):
os.makedirs(folder_tifs)
for idx, region in regions.iterrows():
bbox = region['geometry'].envelope
geo = gpd.GeoDataFrame()
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[idx])
coords = [json.loads(geo.to_json())['features'][0]['geometry']]
#chop on coords
out_img, out_transform = mask(settlements, coords, crop=True)
# Copy the metadata
out_meta = settlements.meta.copy()
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": 'epsg:4326'})
path_output = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path_output, "w", **out_meta) as dest:
dest.write(out_img)
print('Completed settlement.tif regional segmentation')
nodes, missing_nodes = find_nodes(country, regions)
missing_nodes = get_missing_nodes(country, regions, missing_nodes, 10, 10)
nodes = nodes + missing_nodes
nodes = gpd.GeoDataFrame.from_features(nodes, crs='epsg:4326')
bool_list = nodes.intersects(regions['geometry'].unary_union)
nodes = pd.concat([nodes, bool_list], axis=1)
nodes = nodes[nodes[0] == True].drop(columns=0)
agglomerations = []
print('Identifying agglomerations')
for idx1, region in regions.iterrows():
seen = set()
for idx2, node in nodes.iterrows():
if node['geometry'].intersects(region['geometry']):
agglomerations.append({
'type': 'Feature',
'geometry': mapping(node['geometry']),
'properties': {
'id': idx1,
'GID_0': region['GID_0'],
GID_level: region[GID_level],
'population': node['sum'],
}
})
seen.add(region[GID_level])
if len(seen) == 0:
agglomerations.append({
'type': 'Feature',
'geometry': mapping(region['geometry'].centroid),
'properties': {
'id': 'regional_node',
'GID_0': region['GID_0'],
GID_level: region[GID_level],
'population': 1,
}
})
agglomerations = gpd.GeoDataFrame.from_features(
[
{
'geometry': item['geometry'],
'properties': {
'id': item['properties']['id'],
'GID_0':item['properties']['GID_0'],
GID_level: item['properties'][GID_level],
'population': item['properties']['population'],
}
}
for item in agglomerations
],
crs='epsg:4326'
)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations')
path_output = os.path.join(folder, 'agglomerations' + '.shp')
agglomerations.to_file(path_output)
agglomerations['lon'] = agglomerations['geometry'].x
agglomerations['lat'] = agglomerations['geometry'].y
agglomerations = agglomerations[['lon', 'lat', GID_level, 'population']]
agglomerations.to_csv(os.path.join(folder, 'agglomerations.csv'), index=False)
return print('Agglomerations layer complete')
def process_existing_fiber(country):
"""
Load and process existing fiber data.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
iso2 = country['iso2'].lower()
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
if not os.path.exists(folder):
os.makedirs(folder)
filename = 'core_edges_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Existing fiber already processed')
path = os.path.join(DATA_RAW, 'afterfiber', 'afterfiber.shp')
shape = fiona.open(path)
data = []
for item in shape:
if item['properties']['iso2'].lower() == iso2.lower():
if item['geometry']['type'] == 'LineString':
if int(item['properties']['live']) == 1:
data.append({
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': item['geometry']['coordinates'],
},
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
if item['geometry']['type'] == 'MultiLineString':
if int(item['properties']['live']) == 1:
try:
geom = MultiLineString(item['geometry']['coordinates'])
for line in geom:
data.append({
'type': 'Feature',
'geometry': mapping(line),
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
except:
# some geometries are incorrect from data source
# exclude to avoid issues
pass
if len(data) == 0:
return print('No existing infrastructure')
data = gpd.GeoDataFrame.from_features(data)
data.to_file(path_output, crs='epsg:4326')
return print('Existing fiber processed')
def find_nodes_on_existing_infrastructure(country):
"""
Find those agglomerations which are within a buffered zone of
existing fiber links.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
filename = 'core_nodes_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Already found nodes on existing infrastructure')
else:
if not os.path.dirname(path_output):
os.makedirs(os.path.dirname(path_output))
path = os.path.join(folder, 'core_edges_existing.shp')
if not os.path.exists(path):
return print('No existing infrastructure')
existing_infra = gpd.read_file(path, crs='epsg:4326')
existing_infra = existing_infra.to_crs(epsg=3857)
existing_infra['geometry'] = existing_infra['geometry'].buffer(5000)
existing_infra = existing_infra.to_crs(epsg=4326)
# shape_output = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'core_edges_buffered.shp')
# existing_infra.to_file(shape_output, crs='epsg:4326')
path = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'agglomerations.shp')
agglomerations = gpd.read_file(path, crs='epsg:4326')
bool_list = agglomerations.intersects(existing_infra.unary_union)
agglomerations = pd.concat([agglomerations, bool_list], axis=1)
agglomerations = agglomerations[agglomerations[0] == True].drop(columns=0)
agglomerations['source'] = 'existing'
agglomerations.to_file(path_output, crs='epsg:4326')
return print('Found nodes on existing infrastructure')
def find_nodes(country, regions):
"""
Find key nodes.
Parameters
----------
country : dict
Contains all country specfic information.
regions : dataframe
All regions to be assessed.
Returns
-------
interim : list of dicts
Contains geojson dicts for nodes.
missing_nodes : list
Contains the id of regions with missing nodes.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
threshold = country['pop_density_km2']
settlement_size = country['settlement_size']
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
interim = []
missing_nodes = set()
print('Working on gathering data from regional rasters')
for idx, region in regions.iterrows():
path = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path) as src:
data = src.read()
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
geojson_region = [
{
'geometry': region['geometry'],
'properties': {
GID_level: region[GID_level]
}
}
]
gpd_region = gpd.GeoDataFrame.from_features(
[
{'geometry': poly['geometry'],
'properties':{
GID_level: poly['properties'][GID_level]
}}
for poly in geojson_region
], crs='epsg:4326'
)
if len(shapes_df) == 0:
continue
nodes = gpd.overlay(shapes_df, gpd_region, how='intersection')
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = pd.concat([shapes_df, stats_df], axis=1).drop(columns='value')
nodes_subset = nodes[nodes['sum'] >= settlement_size]
if len(nodes_subset) == 0:
missing_nodes.add(region[GID_level])
for idx, item in nodes_subset.iterrows():
interim.append({
'geometry': item['geometry'].centroid,
'properties': {
GID_level: region[GID_level],
'count': item['count'],
'sum': item['sum']
}
})
return interim, missing_nodes
def get_missing_nodes(country, regions, missing_nodes, threshold, settlement_size):
"""
Find any missing nodes.
Parameters
----------
country : dict
Contains all country specfic information.
regions : dataframe
All regions to be assessed.
missing_nodes : list
Contains the id of regions with missing nodes.
threshold : int
Population density threshold in persons per square kilometer.
settlement_size : int
Overall settlement size threshold.
Returns
-------
interim : list of dicts
Contains geojson dicts for nodes.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
interim = []
for idx, region in regions.iterrows():
if not region[GID_level] in list(missing_nodes):
continue
path = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path) as src:
data = src.read()
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
geojson_region = [
{
'geometry': region['geometry'],
'properties': {
GID_level: region[GID_level]
}
}
]
gpd_region = gpd.GeoDataFrame.from_features(
[
{'geometry': poly['geometry'],
'properties':{
GID_level: poly['properties'][GID_level]
}}
for poly in geojson_region
], crs='epsg:4326'
)
nodes = gpd.overlay(shapes_df, gpd_region, how='intersection')
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = pd.concat([shapes_df, stats_df], axis=1).drop(columns='value')
max_sum = nodes['sum'].max()
nodes = nodes[nodes['sum'] > max_sum - 1]
for idx, item in nodes.iterrows():
interim.append({
'geometry': item['geometry'].centroid,
'properties': {
GID_level: region[GID_level],
'count': item['count'],
'sum': item['sum']
}
})
return interim
def find_regional_nodes(country):
"""
Find the nodes in each region.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder = os.path.join(DATA_INTERMEDIATE, iso3)
input_path = os.path.join(folder, 'agglomerations', 'agglomerations.shp')
existing_nodes_path = os.path.join(folder, 'network_existing', 'core_nodes_existing.shp')
output_path = os.path.join(folder, 'network', 'core_nodes.shp')
regional_output_path = os.path.join(folder, 'network', 'regional_nodes')
regions = gpd.read_file(input_path, crs="epsg:4326")
unique_regions = regions[GID_level].unique()
if os.path.exists(output_path):
return print('Regional nodes layer already generated')
folder = os.path.dirname(output_path)
if not os.path.exists(folder):
os.makedirs(folder)
if not os.path.exists(regional_output_path):
os.makedirs(regional_output_path)
interim = []
for unique_region in unique_regions:
if unique_region in country['regions_to_skip']:
continue
agglomerations = []
for idx, region in regions.iterrows():
if unique_region == region[GID_level]:
agglomerations.append({
'type': 'Feature',
'geometry': region['geometry'],
'properties': {
GID_level: region[GID_level],
'population': region['population'],
'source': 'existing',
}
})
regional_nodes = gpd.GeoDataFrame.from_features(agglomerations, crs='epsg:4326')
path = os.path.join(regional_output_path, unique_region + '.shp')
regional_nodes.to_file(path)
agglomerations = sorted(agglomerations, key=lambda k: k['properties']['population'], reverse=True)
interim.append(agglomerations[0])
if os.path.exists(existing_nodes_path):
output = []
new_nodes = []
seen = set()
existing_nodes = gpd.read_file(existing_nodes_path, crs='epsg:4326')
existing_nodes = existing_nodes.to_dict('records')
for item in existing_nodes:
seen.add(item[GID_level])
output.append({
'type': 'Point',
'geometry': mapping(item['geometry']),
'properties': {
GID_level: item[GID_level],
'population': item['population'],
'source': 'existing',
}
})
for item in interim:
if not item['properties'][GID_level] in seen:
new_node = {
'type': 'Point',
'geometry': mapping(item['geometry']),
'properties': {
GID_level: item['properties'][GID_level],
'population': item['properties']['population'],
'source': 'new',
}
}
output.append(new_node)
new_nodes.append(new_node)
output = gpd.GeoDataFrame.from_features(output)
output.to_file(output_path, crs='epsg:4326')#write core nodes
if len(new_nodes) > 0:
new_nodes = gpd.GeoDataFrame.from_features(new_nodes)
path = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'new_nodes.shp')
new_nodes.to_file(path, crs='epsg:4326')#write core nodes
if not os.path.exists(output_path):
output = gpd.GeoDataFrame.from_features(
[
{'geometry': item['geometry'], 'properties': item['properties']}
for item in interim
],
crs='epsg:4326'
)
output['source'] = 'new'
output.to_file(output_path)#write core nodes
output = []
for unique_region in unique_regions:
path = os.path.join(regional_output_path, unique_region + '.shp')
if os.path.exists(path):
regional_nodes = gpd.read_file(path, crs='epsg:4326')
for idx, regional_node in regional_nodes.iterrows():
output.append({
'geometry': regional_node['geometry'],
'properties': {
'value': regional_node['population'],
'source': 'new',
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
path = os.path.join(folder, 'regional_nodes.shp')
output.to_file(path)
return print('Completed regional node estimation')
def prepare_edge_fitting(country):
"""
Meta function for fitting edges to nodes using a minimum spanning tree.
Parameters
----------
country : dict
Contains all country specfic information.
"""
folder = os.path.join(DATA_INTERMEDIATE, country['iso3'])
core_edges_path = os.path.join(folder, 'network_existing', 'core_edges_existing.shp')
if not os.path.exists(core_edges_path):
input_path = os.path.join(folder, 'network', 'core_nodes.shp')
output_path = os.path.join(folder, 'network', 'core_edges.shp')
fit_edges(input_path, output_path)
else:
core_nodes_path = os.path.join(folder, 'network_existing', 'core_nodes_existing.shp')
existing_nodes = gpd.read_file(core_nodes_path, crs='epsg:4326')
path = os.path.join(folder, 'network', 'new_nodes.shp')
output = []
if os.path.exists(path):
new_nodes = gpd.read_file(path, crs='epsg:4326')
for idx, new_node in new_nodes.iterrows():
nearest = nearest_points(new_node.geometry, existing_nodes.unary_union)[1]
geom = LineString([
(
new_node['geometry'].coords[0][0],
new_node['geometry'].coords[0][1]
),
(
nearest.coords[0][0],
nearest.coords[0][1]
),
])
output.append({
'type': 'LineString',
'geometry': mapping(geom),
'properties': {
'id': idx,
'source': 'new'
}
})
existing_edges = gpd.read_file(core_edges_path, crs='epsg:4326')
for idx, existing_edge in existing_edges.iterrows():
output.append({
'type': 'LineString',
'geometry': mapping(existing_edge['geometry']),
'properties': {
'id': idx,
'source': 'existing'
}
})
output = gpd.GeoDataFrame.from_features(output)
path = os.path.join(folder, 'network', 'core_edges.shp')
output.to_file(path, crs='epsg:4326')
def fit_edges(input_path, output_path):
"""
Fit edges to nodes using a minimum spanning tree.
Parameters
----------
input_path : string
Path to nodes shapefile.
output_path : string
Path to write edges to as shapefiles.
"""
folder = os.path.dirname(output_path)
if not os.path.exists(folder):
os.makedirs(folder)
nodes = gpd.read_file(input_path, crs='epsg:4326')
nodes = nodes.to_crs('epsg:3857')
all_possible_edges = []
for node1_id, node1 in nodes.iterrows():
for node2_id, node2 in nodes.iterrows():
if node1_id != node2_id:
geom1 = shape(node1['geometry'])
geom2 = shape(node2['geometry'])
line = LineString([geom1, geom2])
all_possible_edges.append({
'type': 'Feature',
'geometry': mapping(line),
'properties':{
'from': node1_id,
'to': node2_id,
'length': line.length,
'source': 'new',
}
})
if len(all_possible_edges) == 0:
return
G = nx.Graph()
for node_id, node in enumerate(nodes):
G.add_node(node_id, object=node)
for edge in all_possible_edges:
G.add_edge(edge['properties']['from'], edge['properties']['to'],
object=edge, weight=edge['properties']['length'])
tree = nx.minimum_spanning_edges(G)
edges = []
for branch in tree:
link = branch[2]['object']
if link['properties']['length'] > 0:
if 'geometry' in link:
edges.append(link)
if len(edges) > 0:
edges = gpd.GeoDataFrame.from_features(edges, crs='epsg:3857')
edges = edges.to_crs('epsg:4326')
edges.to_file(output_path)
return
def fit_regional_edges(country):
"""
Fit the regional network edges.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network')
path = os.path.join(folder, 'core_nodes.shp')
nodes = gpd.read_file(path, crs="epsg:4326")
unique_regions = nodes[GID_level].unique()
for unique_region in unique_regions:
input_path = os.path.join(folder, 'regional_nodes', unique_region + '.shp')
output_path = os.path.join(DATA_INTERMEDIATE, country['iso3'],
'network', 'regional_edges', unique_region + '.shp')
fit_edges(input_path, output_path)
output = []
for unique_region in unique_regions:
path = os.path.join(DATA_INTERMEDIATE, country['iso3'], 'network',
'regional_edges', unique_region + '.shp')
if os.path.exists(path):
regional_edges = gpd.read_file(path, crs='epsg:4326')
for idx, regional_edge in regional_edges.iterrows():
output.append({
'geometry': regional_edge['geometry'],
'properties': {
'value': regional_edge['length'],
'source': 'new',
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
path = os.path.join(folder, 'regional_edges.shp')
output.to_file(path)
return print('Regional edge fitting complete')
def generate_core_lut(country):
"""
Generate core lut.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
level = country['regional_level']
regional_level = 'GID_{}'.format(level)
filename = 'core_lut.csv'
folder = os.path.join(DATA_INTERMEDIATE, iso3)
output_path = os.path.join(folder, filename)
# if os.path.exists(output_path):
# return print('Core LUT already generated')
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
regions.crs = 'epsg:4326'
output = []
path = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'core_edges.shp')
core_edges = gpd.read_file(path)
core_edges.crs = 'epsg:4326'
core_edges = gpd.GeoDataFrame(
{'geometry': core_edges['geometry'], 'source': core_edges['source']})
existing_edges = core_edges.loc[core_edges['source'] == 'existing']
existing_edges = gpd.clip(regions, existing_edges)
existing_edges = existing_edges.to_crs('epsg:3857')
existing_edges['length'] = existing_edges['geometry'].length
for idx, edge in existing_edges.iterrows():
output.append({
'GID_id': edge[regional_level],
'asset': 'core_edge',
'value': edge['length'],
'source': 'existing',
})
new_edges = core_edges.loc[core_edges['source'] == 'new']
new_edges = gpd.clip(regions, new_edges)
new_edges = new_edges.to_crs('epsg:3857')
new_edges['length'] = new_edges['geometry'].length
for idx, edge in new_edges.iterrows():
output.append({
'GID_id': edge[regional_level],
'asset': 'core_edge',
'value': edge['length'],
'source': 'new',
})
path = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'regional_edges.shp')
if os.path.exists(path):
regional_edges = gpd.read_file(path, crs='epsg:4326')
regional_edges = gpd.clip(regions, regional_edges)
regional_edges = regional_edges.to_crs('epsg:3857')
regional_edges['length'] = regional_edges['geometry'].length
for idx, edge in regional_edges.iterrows():
output.append({
'GID_id': edge[regional_level],
'asset': 'regional_edge',
'value': edge['length'],
'source': 'new', #all regional edges are assumed to be new
})
path = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'core_nodes.shp')
nodes = gpd.read_file(path, crs='epsg:4326')
existing_nodes = nodes.loc[nodes['source'] == 'existing']
f = lambda x:np.sum(existing_nodes.intersects(x))
regions['nodes'] = regions['geometry'].apply(f)
for idx, region in regions.iterrows():
output.append({
'GID_id': region[regional_level],
'asset': 'core_node',
'value': region['nodes'],
'source': 'existing',
})
new_nodes = nodes.loc[nodes['source'] == 'new']
f = lambda x:np.sum(new_nodes.intersects(x))
regions['nodes'] = regions['geometry'].apply(f)
for idx, region in regions.iterrows():
output.append({
'GID_id': region[regional_level],
'asset': 'core_node',
'value': region['nodes'],
'source': 'new',
})
path = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'regional_nodes.shp')
regional_nodes = gpd.read_file(path, crs='epsg:4326')
existing_nodes = regional_nodes.loc[regional_nodes['source'] == 'existing']
f = lambda x:np.sum(existing_nodes.intersects(x))
regions['regional_nodes'] = regions['geometry'].apply(f)
for idx, region in regions.iterrows():
output.append({
'GID_id': region[regional_level],
'asset': 'regional_node',
'value': region['regional_nodes'],
'source': 'existing',
})
new_nodes = regional_nodes.loc[regional_nodes['source'] == 'new']
f = lambda x:np.sum(new_nodes.intersects(x))
regions['regional_nodes'] = regions['geometry'].apply(f)
for idx, region in regions.iterrows():
output.append({
'GID_id': region[regional_level],
'asset': 'regional_node',
'value': region['regional_nodes'],
'source': 'new',
})
output = pd.DataFrame(output)
output = output.drop_duplicates()
output.to_csv(output_path, index=False)
return print('Completed core lut')
def forecast_subscriptions(country):
"""
Forecast the number of unique cellular subscriptions.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
path = os.path.join(DATA_RAW, 'gsma', 'gsma_unique_subscribers.csv')
historical_data = load_subscription_data(path, country['iso3'])
start_point = 2021
end_point = 2030
horizon = 4
forecast = forecast_linear(
country,
historical_data,
start_point,
end_point,
horizon
)
forecast_df = pd.DataFrame(historical_data + forecast)
path = os.path.join(DATA_INTERMEDIATE, iso3, 'subscriptions')
if not os.path.exists(path):
os.mkdir(path)
forecast_df.to_csv(os.path.join(path, 'subs_forecast.csv'), index=False)
path = os.path.join(BASE_PATH, '..', 'vis', 'subscriptions', 'data_inputs')
forecast_df.to_csv(os.path.join(path, '{}.csv'.format(iso3)), index=False)
return print('Completed subscription forecast')
def load_subscription_data(path, iso3):
"""
Load in itu cell phone subscription data.
Parameters
----------
path : string
Location of itu data as .csv.
iso3 : string
ISO3 digital country code.
Returns
-------
output : list of dicts
Time series data of cell phone subscriptions.
"""
output = []
historical_data = pd.read_csv(path, encoding = "ISO-8859-1")
historical_data = historical_data.to_dict('records')
scenarios = ['low', 'baseline', 'high']
for scenario in scenarios:
for year in range(2010, 2021):
year = str(year)
for item in historical_data:
if item['iso3'] == iso3:
output.append({
'scenario': scenario,
'country': iso3,
'penetration': float(item[year]) * 100,
'year': year,
})
return output
def forecast_linear(country, historical_data, start_point, end_point, horizon):
"""
Forcasts subscription adoption rate.
Parameters
----------
historical_data : list of dicts
Past penetration data.
start_point : int
Starting year of forecast period.
end_point : int
Final year of forecast period.
horizon : int
Number of years to use to estimate mean growth rate.
Returns
-------
output : list of dicts
Time series data of cell phone subscriptions.
"""
output = []
scenarios = ['low', 'baseline', 'high']
for scenario in scenarios:
scenario_data = []
subs_growth = country['subs_growth_{}'.format(scenario)]
year_0 = sorted(historical_data, key = lambda i: i['year'], reverse=True)[0]
for year in range(start_point, end_point + 1):
if year == start_point:
penetration = year_0['penetration'] * (1 + (subs_growth/100))
else:
penetration = penetration * (1 + (subs_growth/100))
if year not in [item['year'] for item in scenario_data]:
scenario_data.append({
'scenario': scenario,
'country': country['iso3'],
'year': year,
'penetration': round(penetration, 2),
})
output = output + scenario_data
return output
def forecast_smartphones(country):
"""
Forecast smartphone adoption.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
filename = 'wb_smartphone_survey.csv'
path = os.path.join(DATA_RAW, 'wb_smartphone_survey', filename)
survey_data = load_smartphone_data(path, country)
start_point = 2020
end_point = 2030
forecast = forecast_smartphones_linear(
survey_data,
country,
start_point,
end_point
)
forecast_df =
|
pd.DataFrame(forecast)
|
pandas.DataFrame
|
# libraries
import numpy as np
import pandas as pd
import requests
import scipy.stats as ss
import matplotlib as mpl
import json
import plotly.graph_objects as go
from urllib.request import urlopen
with open('../data/census-key.txt') as key:
api_key = key.read().strip()
years = ['2017', '2018']
county = '*'
state = '*'
for year in years:
# pulls five of the components from the acs 5-year data api and creates a dataframe
dsource = 'acs'
dname = 'acs5'
dset = 'profile'
cols = 'NAME,DP02_0066PE,DP03_0128PE,DP03_0009PE,DP03_0062E'
base_url = f'https://api.census.gov/data/{year}/{dsource}/{dname}/{dset}'
data_url = f'{base_url}?get={cols}&for=county:{county}&in=state:{state}&key={api_key}'
response = requests.get(data_url)
state_url = f'{base_url}?get=DP03_0062E&for=state:{state}&key={api_key}'
state_response = requests.get(state_url)
data = response.json()
df = pd.DataFrame(data[1:], columns=data[0]).\
rename(columns={
'DP02_0066PE':'hs_higher', 'DP03_0128PE':'poverty_rate',
'DP03_0009PE':'unemployment_rate', 'DP03_0062E':'median_income'
})
df['fips'] = df['state']+df['county']
df.drop(columns=['county'], inplace=True)
df = df.astype(dtype={
'hs_higher':'float64', 'unemployment_rate':'float64',
'poverty_rate':'float64', 'median_income':'float64'
}).reset_index()
df['county'] = df['NAME'].str.split(' County').str.get(0)
df['state_name'] = df['NAME'].str.split(', ').str.get(-1)
df['hs_lower'] = 100 - df['hs_higher']
inc_data = state_response.json()
dfs = pd.DataFrame(inc_data[1:], columns=inc_data[0]).\
rename(columns={'DP03_0062E':'st_med_inc'})
dfs = dfs.astype(dtype={'st_med_inc':'float64'})
df = pd.merge(df, dfs, how='left', on='state')
df['med_inc_rate'] = (df['median_income']/df['st_med_inc']).round(3)*100
df.drop(df[df['state']=='72'].index, inplace=True)
df_acs = df[[
'fips', 'state', 'county', 'state_name', 'hs_lower',
'unemployment_rate', 'poverty_rate', 'median_income', 'st_med_inc', 'med_inc_rate'
]]
# pulls data from cbp api and calculates averages change over specified years
dfs = []
yrs = [str(year) for year in list(range(int(year)-1, int(year)+1))]
for yr in yrs:
dsource = 'cbp'
cols = 'ESTAB,EMP'
base_url = f'https://api.census.gov/data/{yr}/{dsource}'
data_url = f'{base_url}?get={cols}&for=county:{county}&in=state:{state}&key={api_key}'
response=requests.get(data_url)
# turns the json data into a dataframe object
data = response.json()
dfy = pd.DataFrame(data[1:], columns=data[0]).\
rename(columns={'ESTAB':'establishments', 'EMP':'employment'})
dfy['fips'] = dfy['state']+dfy['county']
dfy['year'] = yr
dfy.drop(columns=['state', 'county'], inplace=True)
dfy = dfy.astype(dtype={'establishments':'float64', 'employment':'float64'})
# appends the year dataframe to the list
dfs.append(dfy)
# concatenates the individual dataframes and returns a single dataframe
df = pd.concat(dfs, ignore_index=True)
df.sort_values(by=['fips', 'year'], inplace=True)
df['employment'] = df['employment'].replace(0, np.nan)
df['employment'] = df['employment'].fillna(df.groupby('fips')['employment'].transform('mean'))
df['est_chg'] = df.sort_values('year').groupby('fips')['establishments'].pct_change()*100
df['emp_chg'] = df.sort_values('year').groupby('fips')['employment'].pct_change()*100
df1 = df.groupby('fips')['est_chg'].mean().round(1).reset_index()
df2 = df.groupby('fips')['emp_chg'].mean().round(1).reset_index()
df_cbp = pd.merge(df1, df2, how='left', on='fips')
# pulls vacancy data from acs 5-year detailed dataset
dsource = 'acs'
dname = 'acs5'
cols = 'NAME,B25002_001E,B25002_003E,B25004_006E'
base_url = f'https://api.census.gov/data/{year}/{dsource}/{dname}'
with open('../data/census-key.txt') as key:
api_key = key.read().strip()
data_url = f'{base_url}?get={cols}&for=county:{county}&in=state:{state}&key={api_key}'
response = requests.get(data_url)
state_url = f'{base_url}?get=DP03_0062E&for=state:{state}&key={api_key}'
state_response = requests.get(state_url)
data = response.json()
df =
|
pd.DataFrame(data[1:], columns=data[0])
|
pandas.DataFrame
|
from .core import mofa_model
from .utils import *
import sys
from warnings import warn
from typing import Union, Optional, List, Iterable, Sequence
from functools import partial
import numpy as np
from scipy.stats import pearsonr
import pandas as pd
from pandas.api.types import is_numeric_dtype
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
from .utils import maybe_factor_indices_to_factors, _make_iterable, _is_iter
from .plot_utils import _plot_grid
### WEIGHTS ###
def plot_weights(
model: mofa_model,
factors=None,
views=None,
n_features: int = 5,
w_scaled: bool = False,
w_abs: bool = False,
size=2,
color="black",
label_size=5,
x_offset=0.01,
y_offset=0.15,
jitter=0.01,
line_width=0.5,
line_color="black",
line_alpha=0.2,
zero_line=True,
zero_line_width=1,
ncols=4,
sharex=True,
sharey=False,
**kwargs,
):
"""
Plot weights for a specific factor
Parameters
----------
model : mofa_model
An instance of the mofa_model class
factors : str or int or list of str or None
Factors to use (default is all)
views : str or int or list of str or None
The views to get the factors weights for (first view by default)
n_features : int
Number of features with the largest weights to label (in absolute values)
w_scaled : bool
If scale weights to unite variance (False by default)
w_abs : bool
If plot absolute weight values (False by default)
size : float
Dot size (2 by default)
color : str
Colour for the labelled dots (black by default)
label_size : int or float
Font size of feature labels (default is 5)
x_offset : int or float
Offset the feature labels from the left/right side (by 0.03 points by default)
y_offset : int or float
Parameter to repel feature labels along the y axis (0.1 by default)
jitter : bool
Jitter dots per factors (True by default)
line_width : int or float
Width of the lines connecting labels with dots (0.5 by default)
line_color : str
Color of the lines connecting labels with dots (black by default)
line_alpha : float
Alpha level for the lines connecting labels with dots (0.2 by default)
zero_line : bool
If to plot a dotted line at zero (False by default)
zero_line_width : int or float
Width of the line at 0 (1 by default)
ncols : int
Number of columns in the grid of multiple plots, one plot per view (4 by default)
sharex : bool
If to use the same X axis across panels (True by default)
sharey : bool
If to use the same Y axis across panels (False by default)
"""
w = model.get_weights(
views=views,
factors=factors,
df=True,
scale=w_scaled,
absolute_values=w_abs,
)
wm = (
w.join(model.features_metadata.loc[:, ["view"]])
.rename_axis("feature")
.reset_index()
.melt(id_vars=["feature", "view"], var_name="factor", value_name="value")
)
wm["abs_value"] = abs(wm.value)
# Assign ranks to features, per factor
wm["rank"] = wm.groupby("factor")["value"].rank(ascending=False)
wm["abs_rank"] = wm.groupby("factor")["abs_value"].rank(ascending=False)
wm = wm.sort_values(["factor", "abs_rank"], ascending=True)
# Sort factors
wm["factor"] = wm["factor"].astype("category")
wm["factor"] = wm["factor"].cat.reorder_categories(
sorted(wm["factor"].cat.categories, key=lambda x: int(x.split("Factor")[1]))
)
# Set default colour to black if none set
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = "black"
# Fetch top features to label
features_to_label = model.get_top_features(
factors=factors, views=views, n_features=n_features, df=True
)
features_to_label["to_label"] = True
wm = (
features_to_label.loc[:, ["feature", "view", "factor", "to_label"]]
.set_index(["feature", "view", "factor"])
.join(wm.set_index(["feature", "factor", "view"]), how="right")
.reset_index()
.fillna({"to_label": False})
.sort_values(["factor", "to_label"])
)
# Figure out rows & columns for the grid with plots (one plot per view)
view_vars = wm.view.unique()
ncols = min(ncols, len(view_vars))
nrows = int(np.ceil(len(view_vars) / ncols))
fig, axes = plt.subplots(
nrows,
ncols,
sharex=sharex,
sharey=sharey,
figsize=(
ncols * rcParams["figure.figsize"][0],
nrows * rcParams["figure.figsize"][1],
),
)
if ncols == 1:
axes = np.array(axes).reshape(-1, 1)
if nrows == 1:
axes = np.array(axes).reshape(1, -1)
for m, view in enumerate(view_vars):
ri = m // ncols
ci = m % ncols
wm_view = wm.query("view == @view")
# Construct the plot
g = sns.stripplot(
data=wm_view,
x="value",
y="factor",
jitter=jitter,
size=size,
hue="to_label",
palette=["lightgrey", color],
ax=axes[ri, ci],
)
sns.despine(offset=10, trim=True, ax=g)
g.legend().remove()
# Label some points
for fi, factor in enumerate(wm_view.factor.cat.categories):
for sign_i in [1, -1]:
to_label = features_to_label.query(
"factor == @factor & view == @view"
).feature.tolist()
w_set = wm_view.query(
"factor == @factor & value * @sign_i > 0 & feature == @to_label & view == @view"
).sort_values("abs_value", ascending=False)
x_start_pos = sign_i * (w_set.abs_value.max() + x_offset)
y_start_pos = fi - ((w_set.shape[0] - 1) // 2) * y_offset
y_prev = y_start_pos
for i, row in enumerate(w_set.iterrows()):
name, point = row
y_loc = y_prev + y_offset if i != 0 else y_start_pos
g.annotate(
point["feature"],
xy=(point.value, fi),
xytext=(x_start_pos, y_loc),
arrowprops=dict(
arrowstyle="-",
connectionstyle="arc3",
color=line_color,
alpha=line_alpha,
linewidth=line_width,
),
horizontalalignment="left" if sign_i > 0 else "right",
size=label_size,
color="black",
weight="regular",
alpha=0.9,
)
y_prev = y_loc
# Set plot axes labels
g.set(ylabel="", xlabel="Feature weight", title=view)
if zero_line:
axes[ri, ci].axvline(
0, ls="--", color="lightgrey", linewidth=zero_line_width, zorder=0
)
# Remove unused axes
for i in range(len(view_vars), ncols * nrows):
ri = i // ncols
ci = i % ncols
fig.delaxes(axes[ri, ci])
return g
def plot_weights_ranked(
model: mofa_model,
factor="Factor1",
view=0,
n_features: int = 10,
size: int = 25,
label_size=5,
x_rank_offset=10,
x_rank_offset_neg=0,
y_repel_coef=0.03,
attract_to_points=True,
**kwargs,
):
"""
Plot weights for a specific factor
Parameters
----------
model : mofa_model
Factor model
factor : optional
Factor to use (default is Factor1)
view : options
The view to get the factors weights for (first view by default)
n_features : optional
Number of features to label with most positive and most negative weights
size : int
Dit size for labelled features (default is 25)
label_size : optional
Font size of feature labels (default is 5)
x_rank_offset : optional
Offset the feature labels from the left/right side (by 10 points by default)
x_rank_offset_neg : optional
Offset but for the negative weights only (i.e. from the right side)
y_repel_coef : optional
Parameter to repel feature labels along the y axis (0.03 by default)
attract_to_points : optional
If place labels according to the Y coordinate of the point (False by default)
"""
w = model.get_weights(views=view, factors=factor, df=True)
w = pd.melt(
w.reset_index().rename(columns={"index": "feature"}),
id_vars="feature",
var_name="factor",
value_name="value",
)
w["abs_value"] = abs(w.value)
# Assign ranks to features, per factor
w["rank"] = w.groupby("factor")["value"].rank(ascending=False)
w["abs_rank"] = w.groupby("factor")["abs_value"].rank(ascending=False)
w = w.sort_values(["factor", "abs_rank"], ascending=True)
# Set default colour to black if none set
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = "black"
# Construct the plot
ax = sns.lineplot(
x="rank", y="value", data=w, markers=True, dashes=False, linewidth=0.5, **kwargs
)
sns.despine(offset=10, trim=True, ax=ax)
# Plot top features as dots
sns.scatterplot(
x="rank",
y="value",
data=w[w["abs_rank"] < n_features],
linewidth=0.2,
s=size,
alpha=0.75,
**kwargs,
)
# Label top features
# Positive weights
y_start_pos = w[w.value > 0].sort_values("abs_rank").iloc[0].value
y_prev = y_start_pos
for i, point in (
w[(w["abs_rank"] < n_features) & (w["value"] >= 0)].reset_index().iterrows()
):
y_loc = y_prev - y_repel_coef if i != 0 else y_start_pos
y_loc = min(point["value"], y_loc) if attract_to_points else y_loc
ax.text(
x_rank_offset,
y_loc,
point["feature"],
horizontalalignment="left",
size=label_size,
color="black",
weight="regular",
)
y_prev = y_loc
# Negative weights
y_start_neg = w[w.value < 0].sort_values("abs_rank").iloc[0].value
y_prev = y_start_neg
for i, point in (
w[(w["abs_rank"] < n_features) & (w["value"] < 0)].reset_index().iterrows()
):
y_loc = y_prev + y_repel_coef if i != 0 else y_start_neg
y_loc = max(point["value"], y_loc) if attract_to_points else y_loc
ax.text(
w.shape[0] - x_rank_offset_neg,
y_loc,
point["feature"],
horizontalalignment="left",
size=label_size,
color="black",
weight="regular",
)
y_prev = y_loc
# Set plot axes labels
factor_label = f"Factor{factor+1}" if isinstance(factor, int) else factor
ax.set(ylabel=f"{factor_label} weight", xlabel="Feature rank")
return ax
def plot_weights_scaled(
model: mofa_model,
x="Factor1",
y="Factor2",
view=0,
n_features: int = 10,
w_scaled: bool = True,
label_size=5,
y_repel_coef=0.05,
attract_to_points=True,
**kwargs,
):
"""
Scatterplot of feature weights for two factors
Parameters
----------
model : mofa_model
Factor model
factor : optional
Factor to use (default is Factor1)
view : options
The view to get the factors weights for (first view by default)
n_features : optional
Number of features to label with most positive and most negative weights
label_size : optional
Font size of feature labels (default is 5)
y_repel_coef : optional
Parameter to repel feature labels along the y axis (0.03 by default)
attract_to_points : optional
If place labels according to the Y coordinate of the point (False by default)
"""
w = model.get_weights(views=view, factors=[x, y], df=True)
w.columns = ["x", "y"]
if w_scaled:
w.x = w.x / abs(w.loc[abs(w.x).idxmax()].x)
w.y = w.y / abs(w.loc[abs(w.y).idxmax()].y)
wm = (
w.rename_axis("feature")
.reset_index()
.melt(var_name="factor", id_vars=["feature"])
.assign(
value_abs=lambda x: np.abs(x.value), value_sign=lambda x: np.sign(x.value)
)
.sort_values("value_abs", ascending=False)
.head(n_features)
.sort_values(["factor", "value_sign"], ascending=True)
.drop_duplicates("feature")
)
top_features = wm.sort_values("factor", ascending=True).feature.values
# Construct the plot
ax = sns.scatterplot("x", "y", data=w, linewidth=0, color="#CCCCCC", **kwargs)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
ax.set_aspect(1)
for factor in wm.factor.unique():
for sign in wm[wm.factor == factor].value_sign.unique():
feature_set = wm[
(wm.factor == factor) & (wm.value_sign == sign)
].feature.values
w_set = w.loc[feature_set].sort_values("y", ascending=False)
y_start_pos = w_set.y.max()
y_prev = y_start_pos
for i, row in enumerate(w_set.iterrows()):
name, point = row
y_loc = y_prev - y_repel_coef if i != 0 else y_start_pos
y_loc = min(point.y, y_loc) if attract_to_points else y_loc
y_prev = y_loc
ax.text(point.x, y_loc, str(name), size=label_size)
ax.plot([0, point.x], [0, point.y], linewidth=0.5, color="#333333")
sns.despine(offset=10, trim=True, ax=ax)
ax.set_xticks(np.arange(-1, 2.0, step=1.0))
ax.set_yticks(np.arange(-1, 2.0, step=1.0))
# Set plot axes labels
x_factor_label = f"Factor{x+1}" if isinstance(x, int) else x
y_factor_label = f"Factor{y+1}" if isinstance(y, int) else y
ax.set(xlabel=f"{x_factor_label} weight", ylabel=f"{y_factor_label} weight")
return ax
def plot_weights_heatmap(
model: mofa_model,
factors: Union[int, List[int]] = None,
view=0,
n_features: int = None,
w_threshold: float = None,
w_abs: bool = False,
only_positive: bool = False,
only_negative: bool = False,
features_col: pd.DataFrame = None,
cmap=None,
xticklabels_size=10,
yticklabels_size=None,
cluster_factors=True,
cluster_features=True,
**kwargs,
):
"""
Plot weights for top features in a heatmap
Parameters
----------
model : mofa_model
Factor model
factors : optional
Factors to use (all factors in the model by default)
view : options
The view to get the factors weights for (first view by default)
n_features : optional
Number of features for each factor by their absolute value (10 by default)
w_threshold : optional
Absolute weight threshold for a feature to plot (no threshold by default)
w_abs : optional
If to plot absolute weight values
only_positive : optional
If to plot only positive weights
only_negative : optional
If to plot only negative weights
features_col : optional
Pandas data frame with index by feature name with the first column
containing the colour for every feature
cmap : optional
Color map (blue-to-red divergent palette with by default)
xticklabels_size : optional
Font size for features labels (default is 10)
yticklabels_size : optional
Font size for factors labels (default is None)
cluster_factors : optional
If cluster factors (in rows; default is True)
cluster_features : optional
If cluster features (in columns; default in True)
"""
# Set defaults
n_features_default = 10
if factors is None:
factors = list(range(model.nfactors))
if cmap is None:
cmap = sns.diverging_palette(240, 10, n=9, as_cmap=True)
# Fetch weights for the relevant factors
w = (
model.get_weights(views=view, factors=factors, df=True, absolute_values=w_abs)
.rename_axis("feature")
.reset_index()
)
wm = w.melt(id_vars="feature", var_name="factor", value_name="value")
wm = wm.assign(value_abs=lambda x: x.value.abs())
wm["factor"] = wm["factor"].astype("category")
if only_positive and only_negative:
print("Please specify either only_positive or only_negative")
sys.exit(1)
elif only_positive:
wm = wm[wm.value > 0]
elif only_negative:
wm = wm[wm.value < 0]
if n_features is None and w_threshold is not None:
features = wm[wm.value_abs >= w_threshold].feature.unique()
else:
if n_features is None:
n_features = n_features_default
# Get a subset of features
wm = wm.sort_values(["factor", "value_abs"], ascending=False).groupby("factor")
if w_threshold is None:
features = wm.head(n_features).feature.unique()
else:
features = wm[wm.value_abs >= w_threshold].head(n_features).feature.unique()
w = w[w.feature.isin(features)].set_index("feature").T
col_colors = features_col.loc[features, :] if features_col is not None else None
if not isinstance(factors, Iterable) or len(factors) < 2:
cluster_factors = False
cg = sns.clustermap(
w,
cmap=cmap,
col_colors=col_colors,
xticklabels=True,
row_cluster=cluster_factors,
col_cluster=cluster_features,
**kwargs,
)
cg.ax_heatmap.set_xticklabels(
cg.ax_heatmap.xaxis.get_ticklabels(), rotation=90, size=xticklabels_size
)
cg.ax_heatmap.set_yticklabels(
cg.ax_heatmap.yaxis.get_ticklabels(), rotation=0, size=yticklabels_size
)
return cg
def plot_weights_dotplot(
model: mofa_model,
factors: Union[int, List[int]] = None,
view=0,
n_features: int = None,
w_threshold: float = None,
w_abs: bool = False,
only_positive: bool = False,
only_negative: bool = False,
palette=None,
size: int = 30,
linewidth: int = 1,
xticklabels_size=8,
yticklabels_size=5,
ncols=1,
sharex=True,
sharey=False,
**kwargs,
):
"""
Plot weights for top features in a heatmap
Parameters
----------
model : mofa_model
Factor model
factors : optional
Factors to use (all factors in the model by default)
view : options
The view to get the factors weights for (first view by default)
n_features : optional
Number of features for each factor by their absolute value (5 by default)
w_threshold : optional
Absolute weight threshold for a feature to plot (no threshold by default)
w_abs : optional
If to plot absolute weight values
only_positive : optional
If to plot only positive weights
only_negative : optional
If to plot only negative weights
palette : optional
Color map (blue-to-red divergent palette with by default)
size : optional
Dot size (default in 30)
lienwidth : optional
Dot outline width (default is 1)
xticklabels_size : optional
Font size for features labels (default is 10)
yticklabels_size : optional
Font size for factors labels (default is None)
ncols : optional
Number of columns when plotting multiple views (default is 1)
sharex : bool
If to use the same X axis across panels (True by default)
sharey : bool
If to use the same Y axis across panels (False by default)
"""
# Set defaults
n_features_default = 5
if factors is None:
factors = list(range(model.nfactors))
if palette is None:
palette = sns.diverging_palette(240, 10, n=9, as_cmap=True)
# Fetch weights for the relevant factors
w = (
model.get_weights(views=view, factors=factors, df=True, absolute_values=w_abs)
.rename_axis("feature")
.join(model.features_metadata.loc[:, ["view"]])
.reset_index()
)
wm = w.melt(id_vars=["feature", "view"], var_name="factor", value_name="value")
wm = wm.assign(value_abs=lambda x: x.value.abs())
wm["factor"] = wm["factor"].astype("category")
if only_positive and only_negative:
print("Please specify either only_positive or only_negative")
sys.exit(1)
elif only_positive:
wm = wm[wm.value > 0]
elif only_negative:
wm = wm[wm.value < 0]
# Fix factors order
wm.factor = wm.factor.astype("category")
wm.factor = wm.factor.cat.reorder_categories(
sorted(wm.factor.cat.categories, key=lambda x: int(x.split("Factor")[1]))
)
wm.sort_values("factor")
if n_features is None and w_threshold is not None:
features = wm[wm.value_abs >= w_threshold].feature.unique()
else:
if n_features is None:
n_features = n_features_default
# Get a subset of features
wm_g = wm.sort_values(["factor", "value_abs"], ascending=False).groupby(
["factor", "view"]
)
if w_threshold is None:
features = wm_g.head(n_features).feature.unique()
else:
features = (
wm_g[wm_g.value_abs >= w_threshold].head(n_features).feature.unique()
)
wm = wm[wm.feature.isin(features)]
# Fix features order
wm.feature = wm.feature.astype("category")
wm.feature = wm.feature.cat.reorder_categories(features)
wm = wm.sort_values(["factor", "feature"])
# Figure out rows & columns for the grid with plots (one plot per view)
view_vars = wm.view.unique()
ncols = min(ncols, len(view_vars))
nrows = int(np.ceil(len(view_vars) / ncols))
fig, axes = plt.subplots(
nrows,
ncols,
sharex=sharex,
sharey=sharey,
figsize=(
ncols * rcParams["figure.figsize"][0],
nrows * rcParams["figure.figsize"][1],
),
)
if ncols == 1:
axes = np.array(axes).reshape(-1, 1)
if nrows == 1:
axes = np.array(axes).reshape(1, -1)
for m, view in enumerate(view_vars):
ri = m // ncols
ci = m % ncols
wm_view = wm.query("view == @view")
# Construct the plot
g = sns.scatterplot(
data=wm_view,
x="factor",
y="feature",
hue="value",
linewidth=linewidth,
s=size,
palette=palette,
ax=axes[ri, ci],
**kwargs,
)
sns.despine(offset=10, trim=True, ax=g)
g.legend().remove()
norm = plt.Normalize(wm_view.value.min(), wm_view.value.max())
cmap = (
palette
if palette is not None
else sns.diverging_palette(220, 20, as_cmap=True)
)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
try:
g.figure.colorbar(sm, ax=axes[ri, ci])
g.get_legend().remove()
except Exception:
warn("Cannot make a proper colorbar")
plt.draw()
g.set_title(view)
g.set_xticklabels(g.get_xticklabels(), rotation=90, size=xticklabels_size)
g.set_yticklabels(g.get_yticklabels(), size=yticklabels_size)
# Remove unused axes
for i in range(len(view_vars), ncols * nrows):
ri = i // ncols
ci = i % ncols
fig.delaxes(axes[ri, ci])
return g
def plot_weights_scatter(
model: mofa_model,
x="Factor1",
y="Factor2",
view=0,
hist=False,
n_features: int = 10,
label_size: int = 5,
**kwargs,
):
"""
Plot weights for two factors
Parameters
----------
model : mofa_model
Factor model
x : optional
Factor which weights to plot along X axis (Factor1 by default)
y : optional
Factor which weights to plot along Y axis (Factor2 by default)
view : options
The view to get the factors weights for (first view by default)
hist : optional
Boolean value if to add marginal histograms to the scatterplot (jointplot)
n_features : optional
Number of features to label (default is 10)
label_size : optional
Font size of feature labels (default is 5)
"""
w = (
model.get_weights(views=view, factors=[x, y], df=True)
.rename_axis("feature")
.reset_index()
)
# Get features to label
wm = w.melt(id_vars="feature", var_name="factor", value_name="value")
wm = wm.assign(value_abs=lambda x: x.value.abs())
wm["factor"] = wm["factor"].astype("category")
# Set default colour to darkgrey if none set
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = "darkgrey"
sns_plot = sns.jointplot if hist else sns.scatterplot
plot = sns_plot(x=x, y=y, data=w, **kwargs)
sns.despine(offset=10, trim=True)
# Label some features
add_text = plot.ax_joint.text if hist else plot.text
if n_features is not None and n_features > 0:
# Get a subset of features
wm = wm.sort_values(["factor", "value_abs"], ascending=False).groupby("factor")
features = wm.head(n_features).feature.unique()
w_label = w[w.feature.isin(features)].set_index("feature")
del wm
# Add labels to the plot
for i, point in w_label.iterrows():
add_text(
point[x],
point[y],
point.name,
horizontalalignment="left",
size=label_size,
color="black",
weight="regular",
)
return plot
def plot_weights_correlation(
model: mofa_model,
factors: Optional[Union[int, List[int]]] = None,
views=None,
covariates=None,
linewidths=0,
diag=False,
full=True,
cmap=None,
square=True,
**kwargs,
):
"""
Plot correlation of weights and, if provided, covariates
Parameters
----------
model : mofa_model
Factor model
factors : optional
Index of a factor (or indices of factors) to use (all factors by default)
groups : optional
Subset of groups to consider
covarites : optional
A vector, a matrix, or a data frame with covariates (one per column)
linewidths : optional
Heatmap linewidths argument (default is 0)
diag : optional
If to only plot lower triangle of the correlation matrix (False by default)
full : optional
If covariates are provided, also plot inter-factor and inter-covariates correlation coefficients (True by default)
square : optional
Heatmap square argument (True by default)
cmap : optional
Heatmap cmap argument
"""
w = model.get_weights(factors=factors, views=views)
if covariates is not None:
# Transform a vector to a matrix
if len(covariates.shape) == 1:
covariates =
|
pd.DataFrame(covariates)
|
pandas.DataFrame
|
#basics
from gensim import utils
import numpy as np
from numpy.lib.utils import deprecate
import pandas as pd
import re
from functools import wraps
from typing import Union
# pytorch
import torch
from torch import Tensor
from torch._C import dtype
from torch.nn.utils.rnn import pad_sequence
# segnlp
from .label_encoder import LabelEncoder
from .array import ensure_numpy
from .array import ensure_list
from .array import create_mask
from .array import np_cumsum_zero
from .overlap import find_overlap
from .misc import timer
class Batch:
def __init__(self,
df: pd.DataFrame,
label_encoder : LabelEncoder,
pretrained_features: dict = {},
device = None
):
self._df : pd.DataFrame = df
self._pred_df : pd.DataFrame = df.copy(deep=True)
self.label_encoder : LabelEncoder = label_encoder
self._task_regexp = re.compile("seg|link|label|link_label")
self._pretrained_features = pretrained_features
self.device = device
self.__ok_levels = set(["seg", "token", "span", "pair"])
self.use_target_segs : bool = False
self._size = self._df["sample_id"].nunique()
# cache
self.__cache = {}
if "am_id" in self._df.columns:
self.__ok_levels.update(["am", "adu"])
if "seg" in label_encoder.task_labels:
self._pred_df["seg_id"] = None
self._pred_df["target_id"] = None
for task in label_encoder.task_labels:
self._pred_df[task] = None
# #remove columns we dont need
# for c in list(self._pred_df.columns):
# if self._task_regexp.search(c) and c not in label_encoder.task_labels:
# del self._df[c]
# del self._pred_df[c]
def __len__(self):
return self._size
def __sampling_wrapper(func):
@wraps(func)
def wrapped_get(self, *args, **kwargs):
if self.use_target_segs:
kwargs["pred"] = False
return func(self, *args, **kwargs)
return wrapped_get
def __get_column_values(self, df: pd.DataFrame, level: str, key:str):
if level == "token":
flat_values = df.loc[:, key].to_numpy()
else:
flat_values = df.groupby(f"{level}_id", sort = False).first().loc[:, key].to_numpy()
if isinstance(flat_values[0], str):
return flat_values
else:
return torch.LongTensor(flat_values)
def __get_span_idxs(self, df: pd.DataFrame, level:str ):
if level == "am":
ADU_start = df.groupby("adu_id", sort=False).first()["sample_token_id"].to_numpy()
ADU_end = df.groupby("adu_id", sort=False).last()["sample_token_id"].to_numpy() + 1
AC_lens = df.groupby("seg_id", sort=False).size().to_numpy()
AM_start = ADU_start
AM_end = ADU_end - AC_lens
return torch.LongTensor(np.column_stack((AM_start, AM_end)))
else:
start_tok_ids = df.groupby(f"{level}_id", sort=False).first()["sample_token_id"].to_numpy()
end_tok_ids = df.groupby(f"{level}_id", sort=False).last()["sample_token_id"].to_numpy() + 1
return torch.LongTensor(np.column_stack((start_tok_ids, end_tok_ids)))
def __get_mask(self, level:str, pred : bool = False):
return create_mask(self.get(level, "lengths", pred = pred), as_bool = True)
# def __seg_tok_lengths(self, df: pd.DataFrame, level:str):
# return df.groupby(level, sort=False).size().to_numpy()
def __get_lengths(self, df: pd.DataFrame, level:str):
if level == "token":
return torch.LongTensor(df.groupby(level=0, sort = False).size().to_numpy())
else:
return torch.LongTensor(df.groupby(level=0, sort=False)[f"{level}_id"].nunique().to_numpy())
def __get_pretrained_embeddings(self, df:pd.DataFrame, level:str, flat:bool):
if level == "token":
embs = self._pretrained_features["word_embs"]
else:
embs = self._pretrained_features["seg_embs"]
embs = embs[:, :max(self.__get_lengths(df, level)), :]
if flat:
embs = embs[self.__get_mask("level")]
return torch.tensor(embs, dtype = torch.float)
def __add_link_matching_info(self, pair_df:pd.DataFrame, j2i:dict):
def check_true_pair(row, mapping):
p1 = row["p1"]
p2 = row["p2"]
dir = row["direction"]
source = p2 if dir == 2 else p1
target = p1 if dir == 2 else p2
if source not in mapping:
return False
else:
correct_target = mapping[source]
return correct_target == target
j_jt = self._df.loc[:, ["seg_id", "target_id"]].dropna()
# maps a true source to the correct target using the ids of predicted pairs
source2target = {
j2i.get(j, "NONE"): j2i.get(jt, "NONE")
for j,jt in zip(j_jt["seg_id"], j_jt["target_id"])
}
if "NONE" in source2target:
source2target.pop("NONE")
if not source2target:
pair_df["true_link"] = False
return
pair_df["true_link"] = pair_df.apply(check_true_pair, axis = 1, args = (source2target, ))
def __create_pair_df(self, df: pd.DataFrame, pred :bool):
def set_id_fn():
pair_dict = dict()
def set_id(row):
p = tuple(sorted((row["p1"], row["p2"])))
if p not in pair_dict:
pair_dict[p] = len(pair_dict)
return pair_dict[p]
return set_id
# we also have information about whether the seg_id is a true segments
# and if so, which TRUE segmentent id it overlaps with, and how much
i2ratio, j2ratio, i2j, j2i = find_overlap(
target_df = self._df,
pred_df = self._pred_df
)
first_df = df.groupby("seg_id", sort=False).first()
first_df.reset_index(inplace=True)
last_df = df.groupby("seg_id", sort=False).last()
last_df.reset_index(inplace=True)
if pred:
first_target_df = self._df.groupby("seg_id", sort=False).first()
j2link_label = {j:row["link_label"] for j, row in first_target_df.iterrows()}
link_labels = [-1 if i not in i2j else j2link_label.get(i2j[i], -1) for i in first_df.index.to_numpy()]
first_df["link_label"] = link_labels
# we create ids for each memeber of the pairs
# the segments in the batch will have unique ids starting from 0 to
# the total mumber of segments
p1, p2 = [], []
j = 0
for _, gdf in df.groupby("sample_id", sort = False):
n = len(gdf.loc[:, "seg_id"].dropna().unique())
sample_seg_ids = np.arange(
start= j,
stop = j+n
)
p1.extend(np.repeat(sample_seg_ids, n).astype(int))
p2.extend(np.tile(sample_seg_ids, n))
j += n
# setup pairs
pair_df = pd.DataFrame({
"p1": p1,
"p2": p2,
})
if not len(pair_df.index):
return
|
pd.DataFrame()
|
pandas.DataFrame
|
import kgx
import os, sys, click, logging, itertools, pickle, json, yaml
import pandas as pd
from typing import List
from urllib.parse import urlparse
from kgx import Transformer, map_graph, Filter, FilterLocation
from kgx.validator import Validator
from kgx.cli.error_logging import append_errors_to_file, append_errors_to_files
from kgx.cli.decorators import handle_exception
from kgx.cli.utils import get_file_types, get_type, get_transformer, is_writable
from kgx.cli.utils import Config
from kgx.utils import file_write
from neo4j.v1 import GraphDatabase
from neo4j.v1.types import Node, Record
from collections import Counter, defaultdict, OrderedDict
from terminaltables import AsciiTable
import pandas as pd
from datetime import datetime
pass_config = click.make_pass_decorator(Config, ensure=True)
def error(msg):
click.echo(msg)
quit()
@click.group()
@click.option('--debug', is_flag=True, help='Prints the stack trace if error occurs')
@click.version_option(version=kgx.__version__, prog_name=kgx.__name__)
@pass_config
def cli(config, debug):
"""
Knowledge Graph Exchange
"""
config.debug = debug
if debug:
logging.basicConfig(level=logging.DEBUG)
def get_prefix(curie:str) -> str:
if ':' in curie:
prefix, _ = curie.split(':', 1)
return prefix
else:
return None
@cli.command('node-summary')
@click.argument('filepath', type=click.Path(exists=True), required=True)
@click.option('--input-type', type=click.Choice(get_file_types()))
@click.option('--max-rows', '-m', type=int, help='The maximum number of rows to return')
@click.option('--output', '-o', type=click.Path(exists=False))
def node_summary(filepath, input_type, max_rows, output):
"""
Loads and summarizes a knowledge graph node set
"""
t = build_transformer(filepath, input_type)
t.parse(filepath)
g = t.graph
tuples = []
xrefs = set()
with click.progressbar(g.nodes(data=True), label='Reading knowledge graph') as bar:
for n, data in bar:
if 'same_as' in data:
for xref in data['same_as']:
xrefs.add(get_prefix(xref))
category = data.get('category')
prefix = get_prefix(n)
if category is not None and len(category) > 1 and 'named thing' in category:
category.remove('named thing')
if isinstance(category, (list, set)):
category = ", ".join("'{}'".format(c) for c in category)
if prefix is not None:
prefix = "'{}'".format(prefix)
tuples.append((prefix, category))
click.echo('|nodes|: {}'.format(len(g.nodes())))
click.echo('|edges|: {}'.format(len(g.edges())))
xrefs = [x for x in xrefs if x is not None]
if len(xrefs) != 0:
line = 'xref prefixes: {}'.format(', '.join(xrefs))
if output is not None:
file_write(output, '|nodes|: {}'.format(len(g.nodes())))
file_write(output, '|edges|: {}'.format(len(g.edges())))
file_write(output, line)
else:
click.echo('|nodes|: {}'.format(len(g.nodes())))
click.echo('|edges|: {}'.format(len(g.edges())))
click.echo(line)
tuple_count = OrderedDict(Counter(tuples).most_common(max_rows))
headers = [['Prefix', 'Category', 'Frequency']]
rows = [[*k, v] for k, v in tuple_count.items()]
if output is not None:
file_write(output, AsciiTable(headers + rows).table, mode='a')
else:
click.echo(AsciiTable(headers + rows).table)
category_count = defaultdict(lambda: 0)
prefix_count = defaultdict(lambda: 0)
for (prefix, category), frequency in tuple_count.items():
category_count[category] += frequency
prefix_count[prefix] += frequency
headers = [['Category', 'Frequency']]
rows = [[k, v] for k, v in category_count.items()]
if output is not None:
file_write(output, AsciiTable(headers + rows).table, mode='a')
else:
click.echo(AsciiTable(headers + rows).table)
headers = [['Prefixes', 'Frequency']]
rows = [[k, v] for k, v in prefix_count.items()]
if output is not None:
file_write(output, AsciiTable(headers + rows).table, mode='a')
else:
click.echo(AsciiTable(headers + rows).table)
def stringify(s):
if isinstance(s, list):
if s is not None and len(s) > 1 and 'named thing' in s:
s.remove('named thing')
return ", ".join("'{}'".format(c) for c in s)
elif isinstance(s, str):
return "'{}'".format(s)
else:
return str(s)
@cli.command('edge-summary')
@click.argument('filepath', type=click.Path(exists=True), required=True)
@click.option('--input-type', type=click.Choice(get_file_types()))
@click.option('--max_rows', '-m', type=int, help='The maximum number of rows to return')
@click.option('--output', '-o', type=click.Path(exists=False))
def edge_summary(filepath, input_type, max_rows, output):
"""
Loads and summarizes a knowledge graph edge set
"""
t = build_transformer(filepath, input_type)
t.parse(filepath)
g = t.graph
tuples = []
with click.progressbar(g.edges(data=True), label='Reading knowledge graph') as bar:
for s, o, edge_attr in bar:
subject_attr = g.node[s]
object_attr = g.node[o]
subject_prefix = stringify(get_prefix(s))
object_prefix = stringify(get_prefix(o))
subject_category = stringify(subject_attr.get('category'))
object_category = stringify(object_attr.get('category'))
edge_label = stringify(edge_attr.get('edge_label'))
relation = stringify(edge_attr.get('relation'))
tuples.append((subject_prefix, subject_category, edge_label, relation, object_prefix, object_category))
tuple_count = OrderedDict(Counter(tuples).most_common(max_rows))
headers = [['Subject Prefix', 'Subject Category', 'Edge Label', 'Relation', 'Object Prefix', 'Object Category', 'Frequency']]
rows = [[*k, v] for k, v in tuple_count.items()]
if output is not None:
file_write(output, AsciiTable(headers + rows).table)
else:
click.echo(AsciiTable(headers + rows).table)
@cli.command(name='neo4j-node-summary')
@click.option('-a', '--address', type=str, required=True)
@click.option('-u', '--username', type=str, required=True)
@click.option('-p', '--password', type=str, required=True)
@click.option('-o', '--output', type=click.Path(exists=False))
@pass_config
def neo4j_node_summary(config, address, username, password, output=None):
if output is not None and not is_writable(output):
error(f'Cannot write to {output}')
bolt_driver = GraphDatabase.driver(address, auth=(username, password))
query = """
MATCH (x) RETURN DISTINCT x.category AS category
"""
with bolt_driver.session() as session:
records = session.run(query)
categories = set()
for record in records:
category = record['category']
if isinstance(category, str):
categories.add(category)
elif isinstance(category, (list, set, tuple)):
categories.update(category)
elif category is None:
continue
else:
error('Unrecognized value for node.category: {}'.format(category))
rows = []
with click.progressbar(categories, length=len(categories)) as bar:
for category in bar:
query = """
MATCH (x) WHERE x.category = {category} OR {category} IN x.category
RETURN DISTINCT
{category} AS category,
split(x.id, ':')[0] AS prefix,
COUNT(*) AS frequency
ORDER BY category, frequency DESC;
"""
with bolt_driver.session() as session:
records = session.run(query, category=category)
for record in records:
rows.append({
'category' : record['category'],
'prefix' : record['prefix'],
'frequency' : record['frequency']
})
df = pd.DataFrame(rows)
df = df[['category', 'prefix', 'frequency']]
if output is None:
click.echo(df)
else:
df.to_csv(output, sep='|', header=True)
click.echo('Saved report to {}'.format(output))
@cli.command(name='neo4j-edge-summary')
@click.option('-a', '--address', type=str, required=True)
@click.option('-u', '--username', type=str, required=True)
@click.option('-p', '--password', type=str, required=True)
@click.option('-o', '--output', type=click.Path(exists=False))
@pass_config
def neo4j_edge_summary(config, address, username, password, output=None):
if output is not None and not is_writable(output):
error(f'Cannot write to {output}')
bolt_driver = GraphDatabase.driver(address, auth=(username, password))
query = """
MATCH (x) RETURN DISTINCT x.category AS category
"""
with bolt_driver.session() as session:
records = session.run(query)
categories = set()
for record in records:
category = record['category']
if isinstance(category, str):
categories.add(category)
elif isinstance(category, (list, set, tuple)):
categories.update(category)
elif category is None:
continue
else:
error('Unrecognized value for node.category: {}'.format(category))
categories = list(categories)
query = """
MATCH (n)-[r]-(m)
WHERE
(n.category = {category1} OR {category1} IN n.category) AND
(m.category = {category2} OR {category2} IN m.category)
RETURN DISTINCT
{category1} AS subject_category,
{category2} AS object_category,
type(r) AS edge_type,
split(n.id, ':')[0] AS subject_prefix,
split(m.id, ':')[0] AS object_prefix,
COUNT(*) AS frequency
ORDER BY subject_category, object_category, frequency DESC;
"""
combinations = [(c1, c2) for c1 in categories for c2 in categories]
rows = []
with click.progressbar(combinations, length=len(combinations)) as bar:
for category1, category2 in bar:
with bolt_driver.session() as session:
records = session.run(query, category1=category1, category2=category2)
for r in records:
rows.append({
'subject_category' : r['subject_category'],
'object_category' : r['object_category'],
'subject_prefix' : r['subject_prefix'],
'object_prefix' : r['object_prefix'],
'frequency' : r['frequency']
})
df =
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import datetime, time, json
from string import punctuation
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Convolution1D, GRU, MaxPooling1D
from keras.regularizers import l2
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras import initializers
from keras import backend as K
from keras.optimizers import SGD
from collections import defaultdict
# In[6]:
train = pd.read_csv("../data/train.csv")
test = pd.read_csv("../data/test.csv")
# In[7]:
#train.head(6)
# In[8]:
#test.head()
# In[9]:
print(train.shape)
print(test.shape)
# In[10]:
# Check for any null values
print(train.isnull().sum())
print(test.isnull().sum())
# In[11]:
# Add the string 'empty' to empty strings
train = train.fillna('empty')
test = test.fillna('empty')
# In[12]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[13]:
# Preview some of the pairs of questions
for i in range(6):
print(train.question1[i])
print(train.question2[i])
print()
# In[14]:
stop_words = ['the','a','an','and','but','if','or','because','as','what','which','this','that','these','those','then',
'just','so','than','such','both','through','about','for','is','of','while','during','to','What','Which',
'Is','If','While','This']
# In[191]:
def text_to_wordlist(text, remove_stop_words=True, stem_words=False):
# Clean the text, with the option to remove stop_words and to stem words.
# Convert words to lower case and split them
#text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(r"what's", "", text)
text = re.sub(r"What's", "", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r" m ", " am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\0k ", "0000 ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e-mail", "email", text)
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"quikly", "quickly", text)
text = re.sub(r" usa ", " America ", text)
text = re.sub(r" USA ", " America ", text)
text = re.sub(r" u s ", " America ", text)
text = re.sub(r" uk ", " England ", text)
text = re.sub(r" UK ", " England ", text)
text = re.sub(r"india", "India", text)
text = re.sub(r"china", "China", text)
text = re.sub(r"chinese", "Chinese", text)
text = re.sub(r"imrovement", "improvement", text)
text = re.sub(r"intially", "initially", text)
text = re.sub(r"quora", "Quora", text)
text = re.sub(r" dms ", "direct messages ", text)
text = re.sub(r"demonitization", "demonetization", text)
text = re.sub(r"actived", "active", text)
text = re.sub(r"kms", " kilometers ", text)
text = re.sub(r"KMs", " kilometers ", text)
text = re.sub(r" cs ", " computer science ", text)
text = re.sub(r" upvotes ", " up votes ", text)
text = re.sub(r" iPhone ", " phone ", text)
text = re.sub(r"\0rs ", " rs ", text)
text = re.sub(r"calender", "calendar", text)
text = re.sub(r"ios", "operating system", text)
text = re.sub(r"gps", "GPS", text)
text = re.sub(r"gst", "GST", text)
text = re.sub(r"programing", "programming", text)
text = re.sub(r"bestfriend", "best friend", text)
text = re.sub(r"dna", "DNA", text)
text = re.sub(r"III", "3", text)
text = re.sub(r"the US", "America", text)
text = re.sub(r"Astrology", "astrology", text)
text = re.sub(r"Method", "method", text)
text = re.sub(r"Find", "find", text)
text = re.sub(r"banglore", "Banglore", text)
text = re.sub(r" J K ", " JK ", text)
# Remove punctuation from text
text = ''.join([c for c in text if c not in punctuation])
# Optionally, remove stop words
if remove_stop_words:
text = text.split()
text = [w for w in text if not w in stop_words]
text = " ".join(text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return(text)
# In[192]:
def process_questions(question_list, questions, question_list_name, dataframe):
'''transform questions and display progress'''
for question in questions:
question_list.append(text_to_wordlist(question))
if len(question_list) % 100000 == 0:
progress = len(question_list)/len(dataframe) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
# In[193]:
train_question1 = []
process_questions(train_question1, train.question1, 'train_question1', train)
# In[194]:
train_question2 = []
process_questions(train_question2, train.question2, 'train_question2', train)
# In[165]:
test_question1 = []
process_questions(test_question1, test.question1, 'test_question1', test)
# In[166]:
test_question2 = []
process_questions(test_question2, test.question2, 'test_question2', test)
# In[195]:
# Preview some transformed pairs of questions
i = 0
for i in range(i,i+10):
print(train_question1[i])
print(train_question2[i])
print()
# In[168]:
# Find the length of questions
lengths = []
for question in train_question1:
lengths.append(len(question.split()))
for question in train_question2:
lengths.append(len(question.split()))
# Create a dataframe so that the values can be inspected
lengths =
|
pd.DataFrame(lengths, columns=['counts'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 13:30:31 2020
@author: User
"""
import sys
import datetime as dt
from collections import Counter
import pprint
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# import os
from platform import system
import glob
import cycler
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from bs4 import BeautifulSoup
import re
from scipy.stats import linregress
# from sklearn import linear_model
import scipy.signal
import itertools
from itertools import chain, repeat
import logging
import datetime as dt
from pathlib import Path
# import h5py
from multiprocessing import Pool, cpu_count
# import timeit
# import time
matplotlib.rcParams.update({"font.size": 16})
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["axes.edgecolor"] = "#333F4B"
plt.rcParams["xtick.color"] = "#333F4B"
plt.rcParams["ytick.color"] = "#333F4B"
try:
import statsmodels.formula.api as smf
import statsmodels.api as sm
import seaborn as sns
except Exception as e:
print("No modules: %s" % e)
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
if __name__ == "__main__":
print(f"Package: {__package__}, File: {__file__}")
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.experiments.EIS.models import Model_Collection
import post_helper
import merger
# import EC
# sys.path.append(list(FH_path.rglob('*.py')))
# import FH_path.joinpath('FindExpFolder.py')
# import FindExpFolder.py
# from FileHelper import FindExpFolder
# from FindExpFolder import *
# from .experiments import EIS
# from .runEC import run_PAR_DW
from elchempy.runEC.EC_logging_config import start_logging
# logger = start_logging(__name__)
else:
# print('\n\n***** run_PAR_DW *****')
print(f"File: {__file__}, Name:{__name__}, Package:{__package__}")
# FH_path = Path(__file__).parent.parent.parent
# sys.path.append(str(FH_path))
# import FileHelper
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.runEC.EC_logging_config import start_logging
from elchempy.PostEC import post_helper, merger
from elchempy.experiments.EIS.models import Model_Collection
# logger = start_logging(__name__)
_logger = logging.getLogger(__name__)
_logger.setLevel(20)
EvRHE = "E_AppV_RHE"
class PostEC:
AllColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"E_Applied_VRHE",
"j A/cm2",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
DropColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
KeepColls = [
"E_AppV_RHE",
"jmAcm-2",
"Jcorr",
"J_N2_scan",
"Jkin_max",
"Jkin_min",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
# SampleCodes = FindExpFolder.LoadSampleCode()
# FindExpFolder('VERSASTAT').SampleCodeLst
# PostDestDir.mkdir(parents=True,exist_ok=True)
# ExpPARovv = EC_loadOVV()
# OnlyRecentMissingOVV = runEC.MainPrepareList()
# ExpPARovv = ExpPARovv.iloc[100:120]
OutParsID = pd.DataFrame()
# Go1, Go2, Go3 = True, False, False
# Go1, Go2, Go3 = False, True, False
Go1, Go2, Go3 = False, False, True
# KL_coeff = KL_coefficients()
EvRHE_List = [
0,
0.1,
0.2,
0.3,
0.4,
0.45,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.9,
1,
]
def __init__(self):
self.DestDir = FindExpFolder("VERSASTAT").PostDir
@staticmethod
def StartLogging(level_log="INFO"):
# level_log = kwargs['level']
log_fn = FindExpFolder("VERSASTAT").PostDir.joinpath("PostEC_logger.log")
logging.basicConfig(
filename=log_fn,
filemode="w",
level=level_log,
format="%(asctime)s %(levelname)s, %(lineno)d: %(message)s",
)
logging.warning("Started logging for PostEC script...")
def applyParallel(dfGrouped, func):
with Pool(cpu_count() - 1) as p:
ret_list = p.map(func, [group for name, group in dfGrouped])
return ret_list
def check_status(file, verbose=False):
"""Check status will return (status,extra) of filename"""
PAR_file_test = Path(str(file)).stem
match = [
re.search("(?<!VERS|Vers)(AST|postAST|pAST)", str(a))
for a in PAR_file_test.split("_")
]
if any(match):
status = "EoL"
extra = [
a
for a in PAR_file_test.split("_")
if [i for i in match if i][0][0] in a
]
if verbose:
print(file, status, *extra)
return status, extra[0]
# if any([re.search(r'', i) for i in str(Path(str(file)).stem.split('_'))]):
else:
return "BoL", 0
# status =
# extra = [0]
# return status,extra
def postEC_Status(files, verbose=False):
# files = ['N2_HER_1500rpm_JOS6_pAST-sHA_285_#3_Disc_Parstat']
if len(files) > 1:
status_lst, extra_lst = [], []
for file in files:
status, extra = PostEC.check_status(file)
status_lst.append(status)
extra_lst.append(extra)
return status_lst, extra_lst
else:
return PostEC.check_status(files)
def OLD_PostOrganizeFolders(TakeRecentList=True):
postOVV = []
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PAR_version = FileOperations.version
RunOVV_fn_opts = list(
FindExpFolder("VERSASTAT").DestDir.rglob(
"RunOVV_v{0}.xlsx".format(PAR_version)
)
)
RunOVV_fn = [i for i in RunOVV_fn_opts if not "_Conflict" in i.stem][0]
if RunOVV_fn.is_file() and TakeRecentList == True:
OvvFromFile = pd.read_excel(RunOVV_fn, index_col=[0])
status, extra = PostEC.postEC_Status(OvvFromFile.PAR_file.values)
OvvFromFile = OvvFromFile.assign(
**{
"Date_PAR_EXP": OvvFromFile.PAR_date - OvvFromFile.EXP_date,
"Status": status,
"Extra": extra,
}
)
OnlyRecentMissingOVV = OvvFromFile
# OvvFromFile['Date_PAR_EXP'] = OvvFromFile.PAR_date-OvvFromFile.EXP_date
# OvvFromFile['Status'] = OvvFromFile.PAR_file.values
print("EC OVV loaded from file:{0}".format(RunOVV_fn))
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, ["Dest_dir", "EXP_dir", "PAR_file"]
)
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
# CS_parts_pOVV = FileOperations.find_CS_parts(OnlyRecentMissingOVV.Dest_dir.iloc[0])
# chLst =[]
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in OnlyRecentMissingOVV.Dest_dir.values]
# OnlyRecentMissingOVV['Dest_dir'] = chLst
# else:
# pass
postOVVlst, outLst = [], []
postOVVcols = [
"DestFilename",
"SampleID",
"Status",
"Status_extra",
"Electrolyte",
"Gas",
"RPM",
"Scanrate",
"EXP_date",
"Type_Exp",
"SourceFilename",
"Exp_dir",
]
# postOVVout = PostEC.FromListgrp(group)
# postOVVlst = PostEC.applyParallel(OnlyRecentMissingOVV.groupby('Dest_dir'),PostEC.FromListgrp)
# postOVVlst = [outLst.append(PostEC.FromListgrp(i)) for i in OnlyRecentMissingOVV.groupby('Dest_dir')]
# for i in OnlyRecentMissingOVV.groupby('Dest_dir'):
# PostEC.FromListgrp(i)
# try:
# postOVVout = pd.DataFrame(postOVVlst,columns=)
# except Exception as e:
# postOVVout = pd.DataFrame(postOVVlst)
# for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir']):
# PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])
# pass
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir'])]
postOVVout = pd.concat(
[pd.DataFrame(i, columns=postOVVcols) for i in outLst],
sort=False,
ignore_index=True,
)
postOVVout.to_excel(PostDestDir.joinpath("postEC_Organized.xlsx"))
return postOVVout
class EnterExitLog:
def __init__(self, funcName):
self.funcName = funcName
def __enter__(self):
_logger.info(f"Started: {self.funcName}")
self.init_time = dt.datetime.now()
return self
def __exit__(self, type, value, tb):
self.end_time = dt.datetime.now()
self.duration = self.end_time - self.init_time
_logger.info(f"Finished: {self.funcName} in {self.duration} seconds")
def func_timer_decorator(func):
def func_wrapper(*args, **kwargs):
with EnterExitLog(func.__name__):
return func(*args, **kwargs)
return func_wrapper
def get_daily_pickle(exp_type=""):
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(daily_pkl_options, key=lambda x: x.stat().st_ctime)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
}
)
if "EIS" in exp_type:
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_BRUTE_{system()}.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_RAW_WB_{system()}.pkl.compress"
),
}
)
return _result
def _collect_test():
tt = CollectLoadPars(load_type="fast")
class CollectLoadPars:
def __init__(self, load_type="fast"):
self.load_type = load_type
self.load_pars()
self.collect_dict()
def load_pars(self):
_BaseLoad = BaseLoadPars()
_kws = {"EC_index": _BaseLoad.EC_index, "SampleCodes": _BaseLoad.SampleCodes}
if "fast" in self.load_type:
_kws.update(**{"reload": False, "reload_raw": False})
self.EIS_load = EIS_LoadPars(**_kws)
self.ORR_load = ORR_LoadPars(**_kws)
self.N2_load = N2_LoadPars(**_kws)
def collect_dict(self):
_load_attrs = [i for i in self.__dict__.keys() if i.endswith("_load")]
_collect = {}
for _load_pars in _load_attrs:
_pars_name = f'{_load_pars.split("_")[0]}_pars'
if hasattr(getattr(self, _load_pars), _pars_name):
_pars = getattr(getattr(self, _load_pars), _pars_name)
_collect.update({_pars_name: _pars})
self.pars_collection = _collect
class BaseLoadPars:
_required_funcs = [
"make_raw_pars_from_scratch",
"edit_raw_columns",
"search_pars_files",
"read_in_pars_files",
"extra_stuff_delegator",
]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
reload=False,
reload_raw=False,
):
self.exp_type = exp_type
self._auto_set_exp_type()
self.EC_index = EC_index
self.SampleCodes = SampleCodes
self._check_class_req_functions()
self.check_EC_index()
self.set_OVV_exp_type()
self._reload = reload
self._reload_raw = reload_raw
self.get_daily_pickle()
if self.exp_type:
self.load_delegator()
def _auto_set_exp_type(self):
_cls_name = self.__class__.__name__
if "_" in _cls_name:
_cls_exp_type = _cls_name.split("_")[0]
_exp_type = f"{_cls_exp_type}_pars"
self.exp_type = _exp_type
def check_EC_index(self):
if self.EC_index.empty:
EC_index = ECRunOVV(load=1).EC_index
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
self.EC_index = EC_index
if self.SampleCodes.empty:
SampleCodes = FindExpFolder().LoadSampleCode()
self.SampleCodes = SampleCodes
# SampleCodesChar().load
def set_OVV_exp_type(self):
if not self.EC_index.empty and self.exp_type:
PAR_exp_uniq = self.EC_index.PAR_exp.unique()
PAR_match = [
parexp
for parexp in PAR_exp_uniq
if self.exp_type.split("_")[0] in parexp
]
self.exp_type_match = PAR_match
# if PAR_match:
EC_index_exp = self.EC_index.loc[self.EC_index.PAR_exp.isin(PAR_match)]
self.EC_index_exp = EC_index_exp
if EC_index_exp.empty:
_logger.error(f'set_OVV_exp_type "{self.__class__.__name__}" empty')
self.EC_index_exp_destdirs = EC_index_exp.Dest_dir.unique()
def get_daily_pickle(self):
exp_type = self.exp_type
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(
daily_pkl_options, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
if not daily_pkl_options and not self._reload_raw:
self._reload_raw = True
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
_pickle_path_RAW_read_in = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{exp_type}_{system()}_RAW_read_in.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
"pkl_path_RAW_read_in": _pickle_path_RAW_read_in,
}
)
if "EIS" in exp_type:
daily_pkl_options_RAW_WB = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW_WB.pkl.compress"
)
)
daily_pkl_options_RAW_WB = sorted(
daily_pkl_options_RAW_WB, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_BRUTE.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder(
"VERSASTAT"
).PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW_WB.pkl.compress"
),
"daily_options_RAW_WB": daily_pkl_options_RAW_WB,
}
)
self.daily_pickle_path = _result
def load_delegator(self):
setattr(self, self.exp_type, pd.DataFrame())
if self._reload:
if self._reload_raw:
self.make_raw_pars_from_scratch()
else:
self.read_in_daily_raw()
if hasattr(self, "edit_raw_columns"):
try:
self.edit_raw_columns()
except Exception as e:
_logger.warning(
f'edit_raw_columns in load_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
self.save_daily_pars()
else:
self.read_in_daily_pars()
try:
self.extra_stuff_delegator()
except Exception as e:
_logger.warning(
f'extra_stuff_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
def _check_class_req_functions(self):
for _f in self._required_funcs:
if not hasattr(self, _f) and "BaseLoadPars" not in self.__class__.__name__:
_logger.warning(
f'Class "{self.__class__.__name__}" is missing required func: "{_f}"'
)
def save_daily_pars(self):
pars = getattr(self, self.exp_type)
pars.to_pickle(self.daily_pickle_path["daily_path"])
_logger.info(
f'{self.exp_type} len({len(pars)}) OVV to daily pickle: {self.daily_pickle_path.get("daily_path")}'
)
def read_in_daily_pars(self):
if self.daily_pickle_path.get("daily_options"):
_pars_fp = self.daily_pickle_path.get("daily_options")[-1]
_logger.info(
f"start read_in_daily_pars {self.exp_type} pars OVV from daily {_pars_fp} "
)
_pars = pd.read_pickle(_pars_fp)
try:
_pars = FileOperations.ChangeRoot_DF(_pars, [], coltype="string")
setattr(self, self.exp_type, _pars)
_logger.info(f"Loaded {self.exp_type} pars OVV from daily {_pars_fp} ")
except Exception as e:
_pars = pd.DataFrame()
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp} {e} "
)
else:
_pars = pd.DataFrame()
_pars_fp = "options empty list"
if _pars.empty:
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp}: empty "
)
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", _pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW_read_in.to_pickle(_raw_read_fp)
def read_in_daily_raw(self):
_raw_fp = self.daily_pickle_path.get("daily_options_RAW")[-1]
_pars_RAW = pd.read_pickle(_raw_fp)
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
if not "level_0" in _pars_RAW.columns:
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(f"Loaded raw df {self.exp_type} from daily {_raw_fp} ")
def save_daily_raw(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.to_pickle(self.daily_pickle_path.get("daily_path_RAW"))
_logger.info(
f'{self.exp_type} OVV to daily pickle: {self.daily_pickle_path.get("daily_path_RAW")}'
)
def set_gen_raw_fls(self):
_par_files = [
list(self.search_pars_files(d)) for d in self.EC_index_exp_destdirs
]
self._par_files = _par_files
if not _par_files:
_logger.warning(f"{self.exp_type} set_gen_raw_fls: list empty ")
self._par_fls_gen = (a for i in self._par_files for a in i)
@func_timer_decorator
def generate_raw_df(self):
if not hasattr(self, "_par_fls_gen"):
self.set_gen_raw_fls()
_pars_lst = list(self.read_in_pars_files(self._par_fls_gen))
try:
_pars_RAW = pd.concat(_pars_lst, sort=False)
except Exception as e:
_pars_RAW = pd.DataFrame()
_logger.warning(f"{self.exp_type} generate_raw_df: {e}")
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
@staticmethod
def get_source_meta(filepath):
i = filepath
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_meta_res = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
return _meta_res
def extra_stuff_delegator(self):
_extra_funcs = [i for i in self.__dict__.keys() if i.startswith("_extra")]
for _func in _extra_funcs:
try:
func = getattr(self, _func)
func()
# self._extra_plotting()
except Exception as e:
_logger.info(
f"{self.__class__.__name__} Extra stuff failed because {e}"
)
def _testing():
tt = EIS_LoadPars(reload=False, reload_raw=False)
tt._reload_raw
self = tt
self.load_delegator()
self.make_raw_pars_from_scratch()
class EIS_LoadPars(BaseLoadPars):
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="EIS_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
if i.name.endswith("xlsx"):
_pp = pd.read_excel(i, index_col=[0])
elif i.name.endswith("pkl"):
_pp = pd.read_pickle(i)
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_meta = self.get_source_meta(i)
_pp = _pp.assign(**_meta)
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def search_pars_files(self, _dest_dir):
return Path(_dest_dir.joinpath("EIS")).rglob(
f"*_pars_v{FileOperations.EIS_version}.xlsx"
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
self._load_WB_delegator()
self._merge_WB_pars_raw()
self._raw_finish_edit_columns()
self.save_daily_raw()
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
EIS_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
EIS_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_RAW_read_in.to_pickle(_raw_read_fp)
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def _raw_extra_steps(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
float_cols = set(
[
a
for i in EIS_pars_all.lmfit_var_names.unique()
if type(i) == str and not "(" in i
for a in i.split(", ")
]
)
float_cols.update(
set(
[a for i in float_cols for a in EIS_pars_all.columns if a.startswith(i)]
)
)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].fillna(0)
# EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
obj_flt_cols = [
i
for i in EIS_pars_all.columns
if str(EIS_pars_all[i].dtype) == "object" and i in float_cols
]
EIS_pars_all[obj_flt_cols] = EIS_pars_all[obj_flt_cols].replace("", 0)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
wrong_fls = [
EIS_pars_all.loc[EIS_pars_all[i].astype(str).str.contains("Parameter")]
for i in obj_flt_cols
]
if wrong_fls:
wrong_objflt_df = pd.concat(wrong_fls)
fix_dct = {
i: [
float(v.split("value=")[-1].split(",")[0])
for v in wrong_objflt_df[i].values
]
for i in obj_flt_cols
}
fixed_objflt_df = wrong_objflt_df.assign(**fix_dct)
EIS_pars_all = pd.concat(
[
EIS_pars_all.drop(index=wrong_objflt_df.index, axis=0),
fixed_objflt_df,
],
axis=0,
sort=True,
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _load_WB_delegator(self):
daily_options_WB = self.daily_pickle_path.get("daily_options_RAW_WB")
if daily_options_WB:
_WB_RAW_daily_path = daily_options_WB[-1]
if _WB_RAW_daily_path.exists() and not (self._reload or self._reload_raw):
_EIS_WB_pars_all = pd.read_pickle(_WB_RAW_daily_path)
setattr(self, f"{self.exp_type}_WB", _EIS_WB_pars_all)
else:
self.reload_raw_WB_df()
else:
self.reload_raw_WB_df()
def reload_raw_WB_df(self):
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type} WB')
_EIS_WB_files = [
list(Path(d.joinpath("EIS/lin_Warburg")).rglob(f"lin_Warburg*.pkl"))
for d in self.EC_index_exp_destdirs
]
self._EIS_WB_files = _EIS_WB_files
self._EIS_WB_fls = (a for i in _EIS_WB_files for a in i)
_WB_lst = list(self.read_in_pars_files(self._EIS_WB_fls))
_EIS_WB_pars_all = pd.concat(_WB_lst, sort=False, ignore_index=True)
setattr(self, f"{self.exp_type}_WB", _EIS_WB_pars_all)
_EIS_WB_pars_all.to_pickle(self.daily_pickle_path.get("daily_path_RAW_WB"))
def _merge_WB_pars_raw(self):
_EIS_WB_pars_all = getattr(self, f"{self.exp_type}_WB")
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
_diffcols = set(EIS_pars_all.columns).difference(_EIS_WB_pars_all.columns)
_mcols = [
i
for i in set(EIS_pars_all.columns).intersection(_EIS_WB_pars_all.columns)
if i
not in [
"sourceFilename",
"source_mtime",
"source_delta_mtime",
"sourcebasename",
]
]
_dtype_mismatch = [
(i, EIS_pars_all[i].dtype, _EIS_WB_pars_all[i].dtype)
for i in _mcols
if EIS_pars_all[i].dtype != _EIS_WB_pars_all[i].dtype
]
if _dtype_mismatch:
_excl = []
for i in _dtype_mismatch:
try:
_EIS_WB_pars_all[i[0]] = _EIS_WB_pars_all[i[0]].astype(i[1])
except Exception as e:
_excl.append(i[0])
print(i, "\n", e)
_mcols = [i for i in _mcols if i not in _excl]
# EIS_pars_all[i[0]] = EIS_pars_all[i[0]].astype(i[2])
_merge = pd.merge(
EIS_pars_all, _EIS_WB_pars_all, on=_mcols, how="left", suffixes=("", "_WB")
)
if not _merge.empty:
return _merge
else:
print("WB merge was empty")
return EIS_pars_all
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _raw_finish_edit_columns(self):
# EIS_pars_all = self._merge_WB_pars_raw(EIS_pars_all)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_all = EIS_pars_all.assign(
**{
"EIS_fake": [
"fakeZmean" in Path(i).name
for i in EIS_pars_all.PAR_file.to_numpy()
]
}
)
_not_in_index = EIS_pars_all.loc[
(
~(EIS_pars_all.PAR_file.isin(self.EC_index.PAR_file.values))
& (~EIS_pars_all.EIS_fake == True)
)
]
CleanUpCrew(list_of_files=_not_in_index.sourceFilename.unique(), delete=True)
EIS_pars_all = EIS_pars_all.iloc[
~(EIS_pars_all.index.isin(_not_in_index.index))
]
EIS_pars_all = Load_from_Indexes.test_update_from_index(
EIS_pars_all, self.EC_index
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def edit_raw_columns(self):
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
# EIS_pars_RAW = self._raw_extra_steps(EIS_pars_RAW)
E_dc_RHE_cols = [
(np.round(i, 3), np.round(i, 3) * 1e3) for i in EIS_pars_all[EvRHE].values
]
EIS_pars_all = EIS_pars_all.assign(
**{
"E_dc_RHE": [i[0] for i in E_dc_RHE_cols],
"E_dc_RHE_mV": [i[1] for i in E_dc_RHE_cols],
}
)
EIS_pars_recent = EIS_pars_all.loc[
(EIS_pars_all.source_mtime > pd.Timestamp(dt.date(2020, 11, 25)))
& (EIS_pars_all.PAR_file.str.contains("None") == False)
]
EIS_pars_undup = EIS_pars_recent.dropna(subset=self.col_names).drop_duplicates(
keep="first"
)
# === POST EDITING OF LOADED PARS ===
EIS_pars_undup = EIS_pars_undup.assign(
**{"Loading_cm2": EIS_pars_undup["Loading_cm2"].round(3)}
)
EIS_pars_undup = post_helper.make_uniform_EvRHE(EIS_pars_undup)
EIS_pars_undup = CollectPostOVV.MatchECconditions(EIS_pars_undup)
# EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(EC_index, EIS_pars_undup)
_oc_OVV = list(EIS_pars_undup.columns.intersection(self.EC_index_exp.columns))
if not set(self.EC_index_exp.groupby(_oc_OVV).groups.keys()).intersection(
EIS_pars_undup.groupby(_oc_OVV).groups.keys()
):
_drpcols = [
a
for a in EIS_pars_undup.columns
if (
a in [i for i in _oc_OVV if i not in "PAR_file"]
or "_".join(a.split("_")[0:-1])
in [i for i in _oc_OVV if i not in "PAR_file"]
)
]
# EIS_pars_undup.drop(columns =_drpcols)
EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(
self.EC_index, EIS_pars_undup.drop(columns=_drpcols)
)
# EIS_pars_undup = pd.merge(EIS_pars_undup,EIS_OVV,on=_oc_OVV, how='left')
_oc_SC = list(EIS_pars_undup.columns.intersection(self.SampleCodes.columns))
EIS_pars_undup = pd.merge(
EIS_pars_undup, self.SampleCodes, how="left", on=_oc_SC
)
EIS_pars_BRUTE = EIS_pars_undup.loc[
(EIS_pars_undup.BRUTE_FIT == 1) | (EIS_pars_undup.FINAL_FIT == 0)
]
if self.BRUTE_out:
EIS_pars_BRUTE.to_pickle(eis_daily["daily_path_BRUTE"])
EIS_pars = EIS_pars_undup.loc[(EIS_pars_undup.FINAL_FIT == 1)]
EIS_pars = EIS_extra_methods.add_best_model_per_spectrum(EIS_pars)
setattr(self, self.exp_type, EIS_pars)
# def extra_stuff_delegator(self):
# try:
# self._extra_best_models()
# self._extra_plotting()
# except Exception as e:
# _logger.info(f'{self.__class__.__name__} Extra stuff failed because {e}')
def _extra_best_models(self):
_err_type = "lmfit_MSE"
_filter = "(EIS_pars.lmfit_MSE < 65E4) & (EIS_pars.Rct < 2E3) & (EIS_pars.Rct > 2E-2) \
& (EIS_pars.Rs > 0.01) & (EIS_pars.Rs < 200) & (EIS_pars.Cdlp < 0.075)\
& (EIS_pars.lmfit_redchi < 1E3) & (EIS_pars.Aw < 10E3) & (EIS_pars.Aw > 10E-2)\
& (EIS_pars.Qad < 1) & (EIS_pars.tau < 1E3)"
_filter += '& (EIS_pars.SampleID.str.contains("JOS1|JOS2|JOS3|JOS4|JOS5"))'
_filter += "& (EIS_pars.EIS_fake == False)"
_grps = ["Model_EEC", "Gas", "lmfit_var_names"][0:2]
EIS_pars = self.EIS_pars
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values(["Gas", "mean"], ascending=True)
)
print(best_models)
keep_models = (
best_models.loc[(best_models["count"] > 5) & (best_models["std"] > 0)]
.index.get_level_values(0)
.unique()
)
EIS_pars = EIS_pars.loc[EIS_pars.Model_EEC.isin(keep_models)]
if hasattr(EIS_pars, "best_mod_name"):
# EIS_best_mods = EIS_pars.loc[EIS_pars.Model_EEC_name.isin([i for i in EIS_pars.best_mod_name.unique() if not pd.isna(i)])]
EIS_best_mods = EIS_pars.loc[
EIS_pars.index.isin(
[i for i in EIS_pars.best_mod_n.unique() if not pd.isna(i)]
)
]
self.EIS_pars_best_mods = EIS_best_mods
_agg = (
EIS_best_mods.dropna(subset=[_err_type])
.groupby(_grps + ["E_RHE"])[_err_type]
.agg(["count", "mean", "std"])
)
_agg_best = _agg.loc[_agg["count"] > 3].sort_values(
["Gas", "E_RHE", "mean"], ascending=True
)
def _extra_plotting(self):
if hasattr(self, "EIS_pars_best_mods"):
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Qad",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 0.05),
)
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Rs",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 80),
)
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Rs",
x="R_ion",
c="E_RHE",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 80),
xlim=(0.1, 2e3),
logx=True,
)
def _testing():
t2 = ORR_LoadPars(reload=True, reload_raw=True)
tf2 = ORR_LoadPars(reload=False, reload_raw=False)
t2._reload_raw
self = tf2
self.load_delegator()
self.make_raw_pars_from_scratch()
class ORR_LoadPars(BaseLoadPars):
read_types = ["ORR_pars", "KL_pars"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="ORR_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
# _source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
# _delta_mtime = dt.datetime.now() - _source_mtime
_i_stem = i.stem
_pparts = i.parent.parts
if "KL" == _pparts[-1]:
if _i_stem.startswith("KL_"):
_type = "KL_data"
else:
_type = "KL_unknown"
elif "RingDisk" == _pparts[-1]:
_type = "ORR_ringdisk"
elif "TAFEL" == _pparts[-1]:
_type = "Tafel"
else:
if _i_stem.startswith("ORR_pars"):
_type = "ORR_pars"
elif _i_stem.startswith("KL_pars"):
_type = "KL_pars"
elif _i_stem.startswith("O2_ORR") and _i_stem.endswith(
f"_RRDE_v{FileOperations.version}"
):
_type = "ORR_RRDE"
else:
_type = "O2_ORR_unknown"
_meta = self.get_source_meta(i)
_meta.update({"source_type": _type})
if _type in self.read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
# _meta.update({'DF' : _pp})
yield _pp
except StopIteration:
return "all done"
print("gen empty")
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
if hasattr(self, "_raw_finish_edit_columns"):
self._raw_finish_edit_columns()
self.save_daily_raw()
def search_pars_files(self, dest_dir):
return Path(dest_dir.joinpath(f"ORR_v{FileOperations.version}")).rglob("*xlsx")
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
# self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def edit_raw_columns(self):
### Fixing the pars after loading...
# TODO : taking out duplicates based on time_since_run....
ORR_pars_char = getattr(self, f"{self.exp_type}_RAW")
# Load_na = ORR_pars_char.loc[(ORR_pars_char.Loading_cm2.isna()) & (ORR_pars_char.PAR_file.isna() == False)]
# if not Load_na.empty:
# Load_na_missingvalues =[(n,*GetSampleID.ink_loading_from_filename(i.PAR_file)) for n,i in Load_na.iterrows()]
# Load_na_vals = pd.DataFrame(Load_na_missingvalues).rename(columns={1 : 'Loading_name',2 : 'Loading_cm2'}).set_index([0])
# ORR_pars_char.Loading_cm2.fillna(value=Load_na_vals.Loading_cm2,inplace=True)
# # ORR_char_merge_cols = [i for i in ORR_pars.columns if i in SampleCodes.columns]
ORR_pars_char = ORR_pars_char.drop(
columns=[i for i in ORR_pars_char.columns if "Unnamed" in i]
)
if not ORR_pars_char.loc[ORR_pars_char.Loading_cm2.isna()].empty:
_loading_cols = ["Loading_cm2", "Loading_name", "Loading_date"]
ORR_pars_char = ORR_pars_char.drop(columns=_loading_cols)
ORR_pars_char = pd.merge(
ORR_pars_char,
self.EC_index[["PAR_file"] + _loading_cols],
on="PAR_file",
how="left",
)
ORR_pars_char.Loading_cm2 = ORR_pars_char.Loading_cm2.fillna(
value=0.379
) # fillna for Loading_cm2
ORR_pars_char.Loading_cm2 = ORR_pars_char.Loading_cm2.round(3)
if ORR_pars_char.postAST.dropna().empty:
ORR_pars_char = ORR_pars_char.drop(columns="postAST")
# _int = list(set(ORR_pars_char.columns).intersection(set(EC_index.columns)))
ORR_pars_char = pd.merge(
ORR_pars_char,
self.EC_index[["PAR_file", "postAST"]],
on="PAR_file",
suffixes=("", ""),
)
ORR_pars_char = make_uniform_RPM_DAC(ORR_pars_char)
setattr(self, f"{self.exp_type}", ORR_pars_char)
# def extra_stuff_delegator(self):
# try:
# self._extra_plotting()
# except Exception as e:
# _logger.info(f'{self.__class__.__name__} Extra stuff failed because {e}')
def _extra_plotting(self):
ORR_pars_char = getattr(self, f"{self.exp_type}")
for swp, swgrp in ORR_pars_char.query("(pH < 14) & (RPM_DAC > 900)").groupby(
"Sweep_Type"
):
fig, (ax1, ax2) = plt.subplots(figsize=(10, 4), ncols=2)
# plt.figure()
swgrp.plot(
y="ORR_Jkin_min_750",
x="ORR_E_onset",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
colormap="rainbow_r",
ylim=[0.1, 50],
xlim=(0.5, 1),
ax=ax1,
)
ax1.set_xlabel("E onset / mV_RHE")
swgrp.plot(
y="ORR_Frac_H2O2_600",
x="ORR_E_onset",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
colormap="rainbow_r",
ylim=[0.1, 100],
xlim=(0.5, 1),
ax=ax2,
)
# ax2.set_xlabel('E onset / mV_RHE')
plt.suptitle("ORR with E_onset")
plt.show()
fig, (ax1, ax2) = plt.subplots(figsize=(10, 4), ncols=2)
swgrp.plot(
y="ORR_E_onset",
x="N2_BG_lin_slope",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
logx=True,
colormap="rainbow_r",
xlim=[0.01, 4],
ylim=(0.5, 1),
ax=ax1,
)
swgrp.plot(
y="ORR_Jkin_min_750",
x="N2_BG_lin_slope",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
logx=True,
colormap="rainbow_r",
xlim=[0.01, 4],
ylim=(0.001, 50),
ax=ax2,
)
# ax2.set_xlabel('E onset / mV_RHE')
plt.suptitle("ORR with N2_BG lin slope")
plt.show()
plt.close()
def _N2_testing():
n2 = N2_LoadPars(reload=True, reload_raw=True)
n2r = N2_LoadPars(reload=True, reload_raw=False)
class N2_LoadPars(BaseLoadPars):
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
if hasattr(self, "_raw_finish_edit_columns"):
self._raw_finish_edit_columns()
self.save_daily_raw()
def _old(self):
IndexOVV_N2_pars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"N2Cdl_pars_IndexOVV_v{0}.pkl.compress".format(FileOperations.version)
)
n2_daily = get_daily_pickle(exp_type="N2_all")
if n2_daily.get("_exists", False) and reload != True:
# Cdl_pars_char = pd.read_excel(IndexOVV_N2_pars_fn,index_col=[0])
Cdl_pars_char = pd.read_pickle(n2_daily.get("daily_path"))
Cdl_pars_char = FileOperations.ChangeRoot_DF(
Cdl_pars_char, [], coltype="string"
)
else:
# @@ Check POST_AST status from OVV and PRM
_logger.info(
f'START reloading N2_pars OVV from daily {n2_daily["today"]:%Y-%m-%d}'
)
# EC_index = ECRunOVV(load=1).index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
# EC_index = FileOperations.ChangeRoot_DF(OnlyRecentMissingOVV,[])
# OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# OnlyRecentMissingOVV['Loading_cm2'] = OnlyRecentMissingOVV['Loading_cm2'].round(3)
# SampleCodes = SampleCodesChar().load
# EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
# def read_df(_par_fls, ):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
def search_pars_files(self, destdir):
return Path(destdir.joinpath(f"N2_scans_v{FileOperations.version}")).rglob(
"*.xlsx"
)
def read_in_pars_files(self, _genlist, read_types=["Cdl_data", "Cdl_pars"]):
while True:
try:
i = next(_genlist)
_i_stem = i.stem
_meta = self.get_source_meta(i)
if _i_stem.endswith("_BG"):
_N2_type = "BG"
else:
if _i_stem.startswith("CV_"):
_N2_type = "CV"
if _i_stem.endswith(f"_first_v{FileOperations.version}"):
_N2_type = "CV_first"
# if not 'Scan Rate' in _pp.columns:
# 'N2_CV_raw = N2_CV_raw.assign(**{'ScanRate' : [i.split(f'_v{FileOperations.version}')[0].split('_')[-1] for i in N2_CV_raw.basename.to_numpy()]})
elif _i_stem.startswith("Cdl_data_"):
_N2_type = "Cdl_data"
elif _i_stem.startswith("Cdl_pars"):
_N2_type = "Cdl_pars"
else:
_N2_type = "N2_unknown"
_meta.update({"N2_type": _N2_type})
if _N2_type in read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
# _meta.update({'DF' : _pp})
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
if not _pars_RAW.empty:
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(
f'Reloading "{self.__class__.__name__}" {self.exp_type} len({len(_pars_RAW)}'
)
def _old_stuff():
if n2_daily.get("_raw_exists", False) and use_daily is True:
N2_pars_all = pd.read_pickle(n2_daily.get("daily_path_RAW"))
elif n2_daily.get("daily_options_RAW", False) and use_daily is True:
if n2_daily.get("daily_options_RAW")[-1]:
N2_pars_all = pd.read_pickle(n2_daily.get("daily_options_RAW")[-1])
else: # Construct new N2 pars ovv from reading in files
N2_OVV = EC_index.loc[EC_index.PAR_exp == "N2_act"]
_par_files = [
list(Path(d.joinpath("N2_scans_v30")).rglob("*.xlsx"))
for d in N2_OVV.Dest_dir.unique()
]
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
_par_reads = read_df(_par_fls, read_types=["Cdl_data", "Cdl_pars"])
N2_pars_all = pd.concat([i["DF"] for i in _par_reads], sort=False)
for n, gr in N2_pars_all.groupby("PAR_file"):
print(
n,
f'\nSamples: {", ".join([str(i) for i in gr.SampleID.unique()])}',
",".join(gr.N2_type.unique()),
)
N2_pars_all, _missing_index = Load_from_Indexes.check_missing_ECindex(
EC_index, N2_pars_all, clean_up=True
)
N2_pars_all.to_pickle(n2_daily["daily_path_RAW"])
def _extra_pivot_CV(self):
N2_type_grps = N2_pars_all.groupby("N2_type")
if "CV" in N2_type_grps.groups.keys():
# N2 CVs TODO add Scan Rate column
N2_CV_raw = N2_type_grps.get_group("CV").dropna(axis=1, how="all")
# N2_CV_raw.plot(x=EvRHE,y='jmAcm-2')
N2_CV_pivot_SR_lst = []
for PF, PFgr in N2_CV_raw.groupby("PAR_file"):
# PF ,PFgr
for swp, swgrp in PFgr.groupby("Sweep_Type"):
# swp, swgrp
# swgrp.plot(x=EvRHE,y='jmAcm-2')
# E_T_idx = pd.MultiIndex.from_tuples(zip(swgrp['Elapsed Time(s)'].to_numpy(),swgrp[EvRHE].to_numpy()),names=['Elapsed_Time_s',EvRHE])
# swgrp.index = E_T_idx
# {n : len(gr) for n,gr in swgrp.groupby('Segment #')}
pvt = swgrp.pivot(
index="Elapsed Time(s)",
columns="ScanRate_mVs",
values=[EvRHE, "jmAcm-2", "Segment #"],
)
# pvt = swgrp.pivot(index=EvRHE,columns='ScanRate_mVs',values='jmAcm-2')
pvt.columns = pd.MultiIndex.from_tuples(
[(f"{i[0]}_{int(i[1])}", i[1]) for i in pvt.columns]
)
# pvt.rename(columns=pd.MultiIndex.from_tuples([(f'{i[0]}_{int(i[1])}', i[1]) for i in pvt.columns],names=['data','ScanRate_mVs']),inplace=True)
indx = pd.MultiIndex.from_tuples(
zip(repeat(PF), repeat(swp), pvt.index),
names=["PAR_file", "Sweep_Type", EvRHE],
)
pvt.index = indx
N2_CV_pivot_SR_lst.append(pvt)
# for sr, srgrp in PFgr.groupby('ScanRate_mVs'):
# SR = int(sr)
N2_CV_pivot_SR = pd.concat(N2_CV_pivot_SR_lst, sort=False)
# N2Cdl_pars_index = N2_grps.groupby('N2_type').get_group('Cdl_pars')
# N2Cdl_pars_files = [Path(i) for i in N2Cdl_pars_index['SourceFilename'].unique() if re.search('(?i)(_pars|_v20)',Path(i).stem) and Path(i).exists()]
# cdl = pd.read_excel(N2Cdl_pars_files[0],index_col=[0])
# N2Cdl_pars.rename(columns={'Filename' : 'PAR_file'})
# EPtest = N2Cdl_pars_index.loc[no_match] # a slice for testing purpose
# pd.merge(N2Cdl_pars_raw,N2_CV_index[['PAR_file','DestFile']],on='PAR_file',how='left')
# N2Cdl_pars_raw = N2_type_grps.get_group('Cdl_pars').dropna(axis=1,how='all')
# N2Cdl_data_index = postOVVout.groupby('Type_output').get_group('N2_Cdl_data')
# N2_CV_index = postOVVout.groupby('Type_output').get_group('N2_CV')
# lst, no_match, non_exist = [],[],[]
# for n,r in N2Cdl_pars_raw.iterrows():
# Cdl_data_file = N2Cdl_data_index.loc[N2Cdl_data_index.PAR_file == r.PAR_file].DestFile.unique()
# CV_files = N2_CV_index.loc[N2_CV_index.PAR_file == r.PAR_file].DestFile.unique()
# lst.append([set(Cdl_data_file),set(CV_files)])
# if len(N2Cdl_pars_raw) == len(lst):
# N2Cdl_pars_raw = N2Cdl_pars_raw.assign(**{'Cdl_data_file' : [i[0] for i in lst], 'Cdl_CV_data_files' : [i[1] for i in lst]})
# Cdl_pars = pd.concat([i for i in lst],sort=False,ignore_index=True)
def edit_raw_columns(self):
N2Cdl_pars_raw = getattr(self, f"{self.exp_type}_RAW")
N2_type_grps = N2Cdl_pars_raw.groupby("N2_type")
N2Cdl_pars_raw = N2_type_grps.get_group("Cdl_pars").dropna(axis=1, how="all")
N2Cdl_pars_raw.drop_duplicates(
subset=N2Cdl_pars_raw.columns[0:19], keep="first", inplace=True
)
N2Cdl_pars_raw = FileOperations.ChangeRoot_DF(
N2Cdl_pars_raw, [], coltype="string"
)
Cdl_pars = post_helper.make_uniform_EvRHE(N2Cdl_pars_raw)
Cdl_pars.drop_duplicates(subset=Cdl_pars.columns[0:19], inplace=True)
# Cdl_pars_merge_cols = [i for i in Cdl_pars.columns if i in SampleCodes.columns and not 'Unnamed' in i]
# Cdl_pars_char = pd.merge(Cdl_pars,SampleCodes,on=Cdl_pars_merge_cols,how='left')
# Cdl_pars_char.drop_duplicates(subset=Cdl_pars_char.columns[0:19],inplace=True)
_int = list(set(Cdl_pars.columns).intersection(set(self.EC_index.columns)))
if Cdl_pars.postAST.dropna().empty and len(self.EC_index.columns) != len(_int):
Cdl_pars = Cdl_pars.drop(columns="postAST")
# _int = list(set(Cdl_pars_char.columns).intersection(set(EC_index.columns)))
Cdl_pars = pd.merge(
Cdl_pars,
self.EC_index[["PAR_file", "postAST"]],
on="PAR_file",
suffixes=("", ""),
)
Cdl_pars = Load_from_Indexes.add_missing_ECindex_cols(self.EC_index, Cdl_pars)
setattr(self, f"{self.exp_type}", Cdl_pars)
def _extra_xls_out(self):
if xls_out:
new_N2_pars_char_target = FileOperations.CompareHashDFexport(
Cdl_pars_char, IndexOVV_N2_pars_fn
)
_logger.info(
"PostEC Cdl N2 CVs re-indexed and saved: {0}".format(
new_N2_pars_char_target
)
)
Cdl_pars_char.to_pickle(IndexOVV_N2_pars_fn)
def _extra_plotting(self):
try:
Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH < 7)').plot(
y="Cdl",
x="E_RHE",
kind="scatter",
ylim=(0, 0.08),
title="checking plot: Cdl in acid",
)
# Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH < 7)').groupby('BET_cat_agg').plot(y='Cdl',x='E_RHE',colormap='viridis',kind='scatter',ylim=(0,0.08),title='Cdl in acid')
if extra_plotting:
Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH > 7)').plot(
y="Cdl",
x="E_RHE",
c="BET_cat_agg",
colormap="viridis",
kind="scatter",
ylim=(0, 0.03),
title="Cdl in alkaline",
)
alkCdl = Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH > 7)')
acidCdl = Cdl_pars_char.query(
'(Sweep_Type_N2 == "cathodic") & (pH < 7)'
)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot_trisurf(alkCdl.E_RHE,alkCdl.Cdl,alkCdl.BET_cat_agg,cmap=cm.viridis)
Cdl_atE = Cdl_pars_char.loc[
(Cdl_pars_char.Sweep_Type_N2 == "cathodic")
& (np.isclose(Cdl_pars_char["E_RHE"], 0.5, atol=0.02))
]
fig, ax = plt.subplots()
for n, Ogr in Cdl_atE.query(
'(Sweep_Type_N2 == "cathodic") & (pH < 7)'
).groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="BET_cat_agg",
y="Cdl",
s=50,
c=c_set,
kind="scatter",
label=n,
title="N2 Cdl vs BET in acid",
ax=ax,
ylim=(0, 50e-3),
)
fig, ax = plt.subplots()
for n, Ogr in Cdl_atE.query(
'(Sweep_Type_N2 == "cathodic") & (pH > 7)'
).groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="BET_cat_agg",
y="Cdl",
s=50,
c=c_set,
kind="scatter",
label=n,
title="N2 Cdl vs BET in alk",
ax=ax,
ylim=(0, 50e-3),
)
except Exception as e:
_logger.warning(f"PostEC Cdl N2 CVs extra plotting fail:\n{e}")
class CollectPostOVV:
"""Loops over all index files and merges them with the RunOVV"""
def __init__():
pass
@staticmethod
def LoadPostOVV(reload=False):
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
SampleCodes = FindExpFolder().LoadSampleCode()
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
if reload == True:
postOVVout = CollectPostOVV.LoadIndexes(reload=True)
else:
try:
postOVVout = CollectPostOVV.LoadIndexes(reload=False)
except Exception as e:
logging.warning(
"CollectPostOVV no Indexes available: {0}. Using postEC_Organized".format(
e
)
)
postOVVout = pd.read_excel(
PostDestDir.joinpath("postEC_Organized.xlsx"), index_col=[0]
)
# pd.read_excel(PostDestDir.joinpath('SampleCodeLst.xlsx'))
# CS_parts_pOVV = FileOperations.find_CS_parts(postOVVout.Exp_dir.iloc[0])
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in postOVVout.SourceFilename.values]
# postOVVout['SourceFilename'] = chLst
# else:
# pass
postSample = pd.merge(postOVVout, SampleCodes, on="SampleID", how="left")
print("Types:", " , ".join([str(i) for i in postSample.Type_output.unique()]))
postSample.PAR_file = postSample.PAR_file.astype(str)
postSample = FileOperations.ChangeRoot_DF(
postSample,
[
"EXP_dir",
"Dest_dir",
"PAR_file",
"PAR_file_Ring",
"ORR_act_N2_bg",
"DestFile",
"SourceFilename",
],
)
return postSample
# def RunFolderCopy(serie):
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in serie.groupby(by=['Dest_dir'])]
# return postOVVlst
@staticmethod
def LoadIndexes(reload=False):
IndexOVV_fn = FindExpFolder("VERSASTAT").DestDir.joinpath(
"IndexOVV_v{0}.xlsx".format(FileOperations.version)
)
if IndexOVV_fn.exists() and not reload:
Index_merged = pd.read_excel(IndexOVV_fn, index_col=[0])
Index_merged = FileOperations.ChangeRoot_DF(
Index_merged,
[
"EXP_dir",
"Dest_dir",
"PAR_file",
"PAR_file_Ring",
"ORR_act_N2_bg",
"DestFile",
"SourceFilename",
],
)
_logger.info("PostEC loaded IndexOVV from recent: {0}".format(IndexOVV_fn))
else:
_logger.info(
"PostEC reloading IndexOVV from Index files and Exp dir files!!"
)
OnlyRecentMissingOVV = ECRunOVV(load=1).index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, []
)
OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# if index_source == 'ExpDirs':
idx_files = [
list(Path(i).rglob("**/*index*.xlsx"))
for i in OnlyRecentMissingOVV.Dest_dir.unique()
if list(Path(i).rglob("**/*index.xlsx"))
]
# for i in OnlyRecentMissingOVV.Dest_dir.unique():
# [idx_files.append([a for a in a if a]) for a in [(Path(i).rglob('index.xlsx')) for i in OnlyRecentMissingOVV.Dest_dir.unique()]]
# idx_dir = FindExpFolder('VERSASTAT').IndexDir
# idx_files = idx_dir.rglob('*.xlsx')
# subset=['PAR_file','DestFile','Type_output','Script_run_date']
idx_lst = set([a for i in idx_files for a in i])
idx_mtime = [
(i, (dt.datetime.now() - dt.datetime.fromtimestamp(i.stat().st_mtime)))
for i in idx_lst
]
# print(f'len {len(idx_lst)} and set {len(set(idx_lst))}')
alst = (
[]
) # Alternative = pd.concat([[pd.read_excel(c,index_col=[0]) for c in a ] for b in idx_files],sort=False,ignore_index=True)
for idxfp in idx_lst:
df = pd.read_excel(idxfp, index_col=[0])
df["IndexSource"] = idxfp
alst.append(df)
Index_from_expdirs_all = pd.concat(
[i for i in alst], sort=False, ignore_index=True
)
Index_from_expdirs_all.sort_values(
"Script_run_date", ascending=False, inplace=True
)
Index_from_expdirs = Index_from_expdirs_all.drop_duplicates(keep="first")
Index_from_expdirs = FileOperations.ChangeRoot_DF(Index_from_expdirs, [])
idx_exp_tDelta = [
(n, pd.to_datetime(dt.datetime.now()) - i["Script_run_date"])
for n, i in Index_from_expdirs.iterrows()
]
Index_from_expdirs = Index_from_expdirs.assign(
**{
"Source": "ExpDirs",
"Time_since_run": [pd.to_timedelta(i[1]) for i in idx_exp_tDelta],
}
)
# Index_from_expdirs['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Index_from_expdirs['Script_run_date'].values]
# limit = pd.to_timedelta('7h')
# ['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Index['Script_run_date'].values]
# Index = Index.loc[Index['Time_since_run'] < limit]
# Index = Index.iloc[dups].loc[Index['Time_since_run'] < limit]
# else:
# dups.append(gr.Time_since_run.idxmin())
# 1elif index_source == 'IndexDir':
IndexDir_idxfiles = list(
FindExpFolder("VERSASTAT").IndexDir.rglob("*.xlsx")
)
Index_from_idxdir_all = pd.concat(
[
pd.read_excel(i, index_col=[0]).assign(IndexSource=i)
for i in IndexDir_idxfiles
],
sort=False,
ignore_index=True,
)
Index_from_idxdir_all.sort_values(
"Script_run_date", ascending=False, inplace=True
)
Index_from_idxdir = Index_from_idxdir_all.drop_duplicates(keep="first")
Index_from_idxdir = FileOperations.ChangeRoot_DF(Index_from_idxdir, [])
Index_from_idxdir = Index_from_idxdir.assign(**{"Source": "IndexDir"})
Index_from_idxdir["Time_since_run"] = [
pd.to_timedelta(pd.to_datetime(dt.datetime.now()) - i)
for i in Index_from_idxdir["Script_run_date"].values
]
# dup_idxdir = Index_from_idxdir.loc[Index_from_idxdir.DestFile.duplicated() == True]
dups_date, singles, others, unused_dups = [], [], [], []
for n, gr in Index_from_idxdir.groupby(
["PAR_file", "DestFile", "Type_output"]
):
# Indexes.groupby(['PAR_file','DestFile','Type_output','ScanRate','Segment']):
if len(gr) > 1:
dgr = gr
# print(n,gr.Time_since_run.unique())
dups_date.append(gr.Time_since_run.idxmin())
unused_dups.append(
list(set(gr.index) - {gr.Time_since_run.idxmin()})
)
elif len(gr) == 1:
singles.append(gr.index[0])
else:
others.append(gr.index)
dup_fltr_idxdir = Index_from_idxdir.loc[singles + dups_date]
# Indexes = pd.merge(Index_from_expdirs,Index_from_idxdir, on=['PAR_file','DestFile','Type_output','ScanRate','Segment','Sweep_Type','Source'])
Indexes = pd.concat([Index_from_expdirs, dup_fltr_idxdir], sort=False)
# Indexes['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Indexes['Script_run_date'].values]
Indexes = Indexes.dropna(
subset=["PAR_file", "DestFile", "Type_output"]
).reset_index()
dups_date, singles, others = [], [], []
Idxgr = Indexes.groupby(["PAR_file", "DestFile", "Type_output"])
for n, gr in Idxgr:
# Indexes.groupby(['PAR_file','DestFile','Type_output','ScanRate','Segment']):
if len(gr) > 1:
dgr = gr
idxmin = gr.Time_since_run.idxmin()
# print(n,gr.Time_since_run.unique())
dups_date.append([idxmin, gr.loc[idxmin, "Source"]])
elif len(gr) == 1:
singles.append(gr.index[0])
else:
others.append(gr.index)
# for n2,gr2 in OnlyRecentMissingOVV.groupby('PAR_file'):
# if len(gr2) > 1:
# dgr2 = gr2
# Index = Index.iloc[dups].loc[Index['Time_since_run'] < limit]
Index = Indexes.loc[singles + [i[0] for i in dups_date]].dropna(
subset=["DestFile"]
)
# for a in Index.DestFile.values:
# try: Path(a).is_file()
# except: print(a)
# if not any([Path(i).exists() for i in Index.DestFile.values]):
# Index = FileOperations.ChangeRoot_DF(Index,['PAR_file','DestFile']) 'EXP_dir','Dest_dir','PAR_file','PAR_file_Ring','ORR_act_N2_bg','DestFile','SourceFilename'
Index = FileOperations.ChangeRoot_DF(Index, [])
Index = Index.assign(
**{
"Type_Exp": Index["Type_output"],
"SourceFilename": [Path(str(i)) for i in Index["DestFile"].values],
}
)
# Index['Type_Exp'] = Index['Type_output']
# Index['SourceFilename'] = [Path(str(i)) for i in Index['DestFile'].values]
Index.PAR_file = Index.PAR_file.astype(str)
Index_undup = Index.loc[
(
Index.duplicated(
subset=[
"PAR_file",
"DestFile",
"Type_output",
"Time_since_run",
"Source",
]
)
== False
)
]
idx_merge_cols = [
i
for i in Index_undup.columns
if i in OnlyRecentMissingOVV.columns and not "Segment" in i
]
Index_merged = pd.merge(
Index_undup, OnlyRecentMissingOVV, on="PAR_file", how="left"
)
Index_merged.PAR_file = [
Path(str(i)) for i in Index_merged["PAR_file"].values
]
new_IndexOVV_target = FileOperations.CompareHashDFexport(
Index_merged, IndexOVV_fn
)
try:
_logger.info(
"PostEC re-indexed and saved: {0}".format(new_IndexOVV_target)
)
except:
print("no log")
return Index_merged
@staticmethod
def MatchPostASTs(postOVVout):
# postOVVout.postAST.unique()
# [(n,len(gr)) for n,gr in postOVVout.groupby('postAST')]
faillst, fail_index_gr = [], []
matchAST_lst, non_uniq_lst = [], []
for nAST, ASTgr in postOVVout.query(
'(postAST != "no") & (postAST != "postORR")'
).groupby(["postAST", "PAR_date", "PAR_file"]):
nAST, ASTgr
# for nDT,grDT in ASTgr.groupby(')
if ASTgr.PAR_file.nunique() == 1 and ASTgr.Source.nunique() > 1:
ASTgr_grSource = ASTgr.groupby("Source")
ASTgr_info = [
(n, len(gr), gr.Time_since_run.mean()) for n, gr in ASTgr_grSource
]
if len(set([i[1] for i in ASTgr_info])) == 1:
take_source = ASTgr_info[np.argmin([i[2] for i in ASTgr_info])][0]
ASTgr = ASTgr_grSource.get_group(take_source)
fail_index_source_gr = ASTgr_grSource.get_group(
ASTgr_info[np.argmax([i[2] for i in ASTgr_info])][0]
)
fail_index_gr.append(fail_index_source_gr)
EC_exp_uniq = [
(i, ASTgr[i].unique(), ASTgr[i].nunique())
for i in [
c
for c in SampleSelection.EC_exp_cols
+ ["SampleID", "Type_exp", "PAR_file"]
if c in ASTgr.columns
]
]
EC_exp_non_uniq = [i for i in EC_exp_uniq if i[2] != 1]
if EC_exp_non_uniq:
print(
"Not unique PAR_date {0},multiple: {1}".format(
nAST[1], EC_exp_non_uniq
)
)
non_uniq_lst.append([nAST, EC_exp_non_uniq, EC_exp_uniq])
faillst.append(ASTgr)
EC_exp_query = " & ".join(
[
'({0} == "{1}")'.format(i[0], i[1][0])
for i in EC_exp_uniq[1:-1] + [("postAST", ["no"])]
if not "Loading" in i[0]
]
)
past = nAST[1] - pd.to_timedelta(1, unit="D")
past_slice = postOVVout.query("(PAR_date > @past) & (PAR_date < @nAST[1])")
past_query = past_slice.query(EC_exp_query)
if past_query.query(EC_exp_query).empty:
# expand search to all OVV for similar conditions
all_query = postOVVout.query(EC_exp_query)
if not all_query.empty:
preAST = tuple(all_query.PAR_file.unique())
else:
preAST = "no-preAST"
else:
# find previous preAST measurments
preAST = tuple(past_query.PAR_file.unique())
matchAST_lst.append(list(nAST) + [preAST])
if fail_index_gr:
fail_index_filter = pd.concat(fail_index_gr)
postOVVout = postOVVout.loc[
~postOVVout.index.isin(fail_index_filter.index), :
]
non_uniq = pd.DataFrame(non_uniq_lst)
if faillst:
fails = pd.concat(faillst)
matchAST = pd.DataFrame(
matchAST_lst, columns=["postAST", "PAR_date", "PAR_file", "preAST"]
)
postOVVout = pd.merge(
postOVVout, matchAST[["PAR_file", "preAST"]], on="PAR_file", how="left"
)
return postOVVout
# ASTgr.SampleID.unique()
@staticmethod
def MatchECconditions(OVV_df):
# postOVVout.postAST.unique()
# [(n,len(gr)) for n,gr in postOVVout.groupby('postAST')]
matchAST_lst = []
# 'DW16_2018-03-06 00:00:00_no_0.1MHClO4+10mMH2O2_1.0_0.379'
OVV_df["PAR_date_day"] = [
dt.datetime.strftime(i, format="%Y-%m-%d")
for i in OVV_df.PAR_date.fillna(dt.date(1970, 12, 12)).to_list()
]
# [pd.datetime.strftime(pd.to_datetime(i),format='%Y-%m-%d') for i in postOVVout.PAR_date.fillna(0).to_list()]
EC_label_cols = [
"SampleID",
"pH",
"Electrolyte",
"Loading_cm2",
"postAST",
"PAR_date_day",
]
post_prev_cols = OVV_df.columns
# +[i for i in SampleSelection.EC_exp_cols if i not in ['RPM','Gas']]
for nAST, ASTgr in OVV_df.groupby(EC_label_cols):
nAST, ASTgr
# for nDT,grDT in ASTgr.groupby(')
minDT, maxDT = ASTgr.PAR_date.min(), ASTgr.PAR_date.max()
deltaDT = maxDT - minDT
# par_Day = pd.datetime.strftime(nAST[-1],format='%Y-%m-%d')
EC_exp_query = "_".join([str(i) for i in list(nAST)])
EC_exp_nodate = "_".join([str(i) for i in list(nAST)[0:-1]])
matchAST_lst.append(
pd.DataFrame(
[
(i, EC_exp_query, EC_exp_nodate, deltaDT)
for i in ASTgr.PAR_file.unique()
],
columns=["PAR_file", "ECexp", "ECuniq", "EC_deltaDT"],
)
)
EC_exp_match = pd.concat(
[i for i in matchAST_lst], ignore_index=True, sort=False
)
OVV_df = pd.merge(OVV_df, EC_exp_match, on=["PAR_file"], how="left")
print(
'Added columns: "{0}" to postOVV with len({1})'.format(
", ".join(list(set(post_prev_cols) - set(OVV_df.columns))), len(OVV_df)
)
)
return OVV_df
# ASTgr.SampleID.unique()
# merge_cols = [i for i in Index.columns if i in OnlyRecentMissingOVV.columns and not 'Segment' in i]
# p2,ovv2 = Index.set_index(merge_cols), OnlyRecentMissingOVV.set_index(merge_cols)
# merge = p2.update(ovv2)
# merge = p2.combine_first(ovv2)
# else:
# AllEIS_BoL = pd.concat([pd.read_excel(i) for i in list(PostDestDir.joinpath('EIS','0.1MH2SO4').rglob('*BoL*'))])
# AllEIS_EoL = pd.concat([pd.read_excel(i) for i in list(PostDestDir.joinpath('EIS','0.1MH2SO4').rglob('*EoL*'))])
# AllEIS_BoL = AllEIS_BoL.loc[(AllEIS_BoL['Unnamed: 0'] > 0.2901) & (AllEIS_BoL['Unnamed: 0'] < 0.301) & (AllEIS_BoL.SampleID != 'O2'),:]
# AllEIS300_EoL = AllEIS_EoL.loc[(AllEIS_EoL['Unnamed: 0'] > 0.2901) & (AllEIS_EoL['Unnamed: 0'] < 0.301) & (AllEIS_EoL.SampleID != 'O2'),:]
# .query('(EXP_date > 20181001)')
# refl = []
# for a in postOVVout.SampleID.values:
# ScodeRef = SampleCodes.loc[SampleCodes.SampleID == a,:]
# if ScodeRef.empty:
# Scode = EISovv['SampleID'].unique()[0]
# else:
# Scode = ScodeRef.Sample.values[0]
# refl.append(Scode)
# postOVVout['SampleLabel'] = refl
# return postOVVout
# for a in postOVVout.SampleID.values:
# ScodeRef = SampleCodes.loc[SampleCodes.SampleID == a,:]
# if ScodeRef.empty:
# Scode = EISovv['SampleID'].unique()[0]
# else:
# Scode = ScodeRef.Sample.values[0]
# refl.append(Scode)
# postOVVout['SampleLabel'] = refl
# postOVVout.loc[postOVVout.Type_Exp == 'EIS_Combined']
# def recently_modified(file,20):
# file_mtime = pd.to_datetime(DestFile.stat().st_mtime,unit='s')
class Load_from_Indexes:
"""This class loads the parameters of Electrochemical Data files and merge it with the Overview"""
SampleCodes = FindExpFolder().LoadSampleCode()
# EC_label_cols = ['SampleID','pH','Electrolyte','Loading_cm2','postAST','PAR_date_day']
EC_label_cols = [
"PAR_file",
"SampleID",
"postAST",
"Loading_cm2",
"Electrolyte",
"pH",
"Gas",
"RPM_DAC",
"E_RHE",
]
PostDestDir = FindExpFolder("VERSASTAT").PostDir
def __init__(self, **kwargs):
if "reload" in kwargs:
# self.postOVVout = CollectPostOVV.LoadPostOVV(kwargs['reload'])
print(
"Exp types found in overview: {0}".format(
", ".join([str(i) for i in self.postOVVout.Type_Exp.unique()])
)
)
pass
@staticmethod
def PreparePostOVV(fastload=False):
postOVV_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
"PostOVVout_v20_{0}.pkl.compress".format(system())
)
if postOVV_pickle_path.is_file():
tdelta = dt.datetime.now() - dt.datetime.fromtimestamp(
postOVV_pickle_path.stat().st_mtime
)
if tdelta.seconds > 600:
fastload = False
print(f"Fastload overwrite to False, {tdelta}")
if fastload == True:
try:
postOVVout = pd.read_pickle(postOVV_pickle_path, compression="xz")
return postOVVout
except Exception as e:
print("Load postOVVout from pickle error: ", e)
LoadOVV = Load_from_Indexes(reload=True)
else:
LoadOVV = Load_from_Indexes(reload=True)
postOVVout = LoadOVV.postOVVout
print("Types:", " , ".join([str(i) for i in postOVVout.Type_output.unique()]))
postOVVout.Loading_cm2 = np.round(postOVVout.Loading_cm2, 3)
postOVVout = CollectPostOVV.MatchPostASTs(postOVVout)
postOVVout = CollectPostOVV.MatchECconditions(postOVVout)
postOVVout.PAR_file = postOVVout.PAR_file.astype(str)
postOVVout["PAR_date_day"] = [
pd.datetime.strftime(pd.to_datetime(i), format="%Y-%m-%d")
for i in postOVVout.PAR_date.fillna(0).values
]
postOVVout = FileOperations.ChangeRoot_DF(postOVVout, [], coltype="string")
postOVVout.to_pickle(postOVV_pickle_path, compression="xz")
return postOVVout
def CollectAllExpTypeOVV():
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
today = datetime.today()
postOVVout = Load_from_Indexes.PreparePostOVV(fastload=False) # len(22965)
# postOVVout.PAR_file = postOVVout.PAR_file.astype(str)
# === Loading preparation overview of Samples and merging with the data from Characterization techniques === #
SampleCodes = PostChar.SampleCodeChar()
#
Reload_set = True
logger = start_logger()
EIS_pars = Load_from_Indexes.EIS_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # EIS_Pars2 6745, 22813
HPRR_pars = Load_from_Indexes.HPRR_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # HPRR 1668
Cdl_pars = Load_from_Indexes.N2_pars_OVV(reload=Reload_set) # Cdl runs 20322
Cdl_pars_catan = MergeEISandCdl.splitcol_Sweep_Cdl(Cdl_pars) # 10342
HER_pars = Load_from_Indexes.HER_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # 2539
OER_pars = Load_from_Indexes.OER_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # run 1347
if list(
PostDestDir.rglob(
f"{today.year}-{today.month}-*_ORR_pars_{system()}.pkl.compress"
)
)[-1].is_file():
ORR_pars = Load_from_Indexes.ORR_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # ORR 1908
ORR_pars.to_pickle(
PostDestDir.joinpath(
f"{today.year}-{today.month}-{today.day}_ORR_pars_{system()}.pkl.compress"
)
)
EIS_pars.to_pickle(
PostDestDir.joinpath(
f"{today.year}-{today.month}-{today.day}_EIS_pars_{system()}.pkl.compress"
)
)
# FindExpFolder().LoadSampleCode()
# SampleCodes = ExportECfromCV.SampleCodes
# SampleSelect_all = SampleSelection('*','*')
# SampleCodesChar = SampleSelect_all.Prep_EA_BET
# SampleCodes = pd.merge(SampleCodes,SampleCodesChar,how='left',on='SampleID',suffixes=('','_char')).drop_duplicates(subset=['SampleID','N_content'])
# === Start preparing pars OVV from index per Experimental type === #
# postOVVout,SampleCodes = pd.DataFrame(),pd.DataFrame()
def extraPostOVV():
OnlyRecentMissingOVV = run_PAR_DW.ECRunOVV(load=1).index
# === Checking expirements from index to analyzed data=== #
[
(i)
for i, gr in OnlyRecentMissingOVV.query('PAR_exp == "EIS"').groupby(
"SampleID"
)
if gr.Loading_cm2.nunique() > 1
]
[
(i)
for i, gr in postOVVout.query('PAR_exp == "EIS"').groupby("SampleID")
if gr.Loading_cm2.nunique() > 1
]
eismiss = OnlyRecentMissingOVV.loc[
OnlyRecentMissingOVV.PAR_file.isin(
[
i
for i in OnlyRecentMissingOVV.query(
'PAR_exp == "EIS"'
).PAR_file.values
if i not in postOVVout.PAR_file.values
]
)
].sort_values(
by="PAR_date",
) # 40
eismiss.to_excel(
FindExpFolder("VERSASTAT").PostDir.joinpath("OVV_EIS_missing.xlsx")
)
orrmiss = OnlyRecentMissingOVV.loc[
OnlyRecentMissingOVV.PAR_file.isin(
[
i
for i in OnlyRecentMissingOVV.query(
'PAR_exp == "ORR" & Electrode != "Pt_ring"'
).PAR_file.values
if i not in ORR_pars.PAR_file.values
]
)
].sort_values(
by="PAR_date",
) # 279
# orrmiss = OnlyRecentMissingOVV.loc[OnlyRecentMissingOVV.PAR_file.isin([i for i in OnlyRecentMissingOVV.query('PAR_exp == "ORR"').PAR_file.values if i not in ORR_pars.PAR_file.values])].sort_values(by='PAR_date',)
orrmiss.to_pickle(PostDestDir.joinpath("ORR_missing.pkl.compress"))
SampleSelection.EC_exp_cols + "SampleID" + EvRHE
for n, gr in Cdl_pars.groupby(
[i for i in SampleSelection.EC_exp_cols if i in Cdl_pars.columns]
):
fig, ax = plt.subplots()
for sID, sgr in gr.groupby("SampleID"):
sgr.plot(
y="Cdl",
x="Qad",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in acid",
ax=ax,
)
EIS_pars.query(SampleSelection.acid1500).query('Gas == "O2" & pH == 1 ').plot(
x="BET_cat_agg", y="Rct", kind="scatter", c="N_content", colormap="viridis"
)
mcls = [i for i in EIS_pars.columns if i in Cdl_pars.dropna(axis=1).columns]
mcls2 = [
i
for i in SampleSelection.EC_exp_cols + ["SampleID", "E_RHE"]
if i in EIS_pars.columns and i in Cdl_pars.dropna(axis=1).columns
]
mcls3 = [
i
for i in SampleSelection.EC_exp_cols + ["SampleID", "E_RHE"]
if i in EIS_pars.columns
and i in Cdl_pars.dropna(axis=1).columns
and i in ORR_pars_char.columns
]
[
(i, EIS_pars[i].dtypes, Cdl_pars[i].dtypes)
for i in mcls
if EIS_pars[i].dtypes != Cdl_pars[i].dtypes
]
EIS_Cdl = pd.merge(EIS_pars, Cdl_pars, on=mcls2, how="outer")
EIS_Cdl_ORR = pd.merge(EIS_Cdl, ORR_pars_char, on=mcls3, how="outer")
# [['E_RHE','Cdl','Cdlp']]
ECdl = EIS_Cdl.dropna(how="any", axis=0, subset=["Cdl", "Cdlp"])
ECdl_ORR = EIS_Cdl.dropna(how="any", axis=0, subset=["Cdl", "Cdlp"])
test1_alk = ECdl.query(
'(pH > 7) & (pH < 15) & (E_RHE > 0.494) & (E_RHE < 0.516) & (Sweep_Type_N2 == "cathodic")'
)
test1_acid = ECdl.query(
'(pH < 7) & (E_RHE > 0.494) & (E_RHE < 0.516) & (Sweep_Type_N2 == "cathodic")'
)
test1_alk.plot(
y="Cdl",
x="Qad",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in alkaline",
)
test1_alk.plot(
y="Cdl_corr",
x="Rct",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in alkaline",
)
test1_acid.plot(
y="Cdl",
x="Qad",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in acid",
)
test1_acid.plot(
y="Cdl",
x="Rct_kin",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in acid",
)
# HPRR_pars = pd.merge(HPRR_pars,postOVVout,on='PAR_file',how='left')
# print('Leftover SampleIDs: {0}'.format(set(HPRR_pars.SampleID.unique()) - set(SampleCodes.SampleID.unique())))
# HPRR_pars = pd.merge(HPRR_pars,SampleCodes,on='SampleID',how='left')
# @@ Check POST_AST status from OVV and PRM...
print(
"Leftover SampleIDs: {0}".format(
set(ORR_pars.SampleID.unique()) - set(SampleCodes.SampleID.unique())
)
)
ORR_pars = pd.merge(ORR_pars, SampleCodes, on="SampleID", how="left")
return HPRR_pars_ovv, EIS_pars_ovv
def get_EC_index():
EC_index = ECRunOVV(load=1).EC_index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
SampleCodes = FindExpFolder().LoadSampleCode()
# SampleCodesChar().load
return EC_index, SampleCodes
@staticmethod
def check_missing_ECindex(OnlyRecentMissingOVV, DF_pars, clean_up=False):
not_in_index = DF_pars.loc[
~DF_pars.PAR_file.isin(OnlyRecentMissingOVV.PAR_file.values)
]
CleanUpCrew(list_of_files=not_in_index.sourceFilename.unique(), delete=clean_up)
return (
DF_pars.loc[DF_pars.PAR_file.isin(OnlyRecentMissingOVV.PAR_file.values)],
not_in_index,
)
@staticmethod
def add_missing_ECindex_cols(EC_index, DF):
if list(EC_index.columns.difference(DF.columns)):
DF = pd.merge(
DF,
EC_index[["PAR_file"] + list(EC_index.columns.difference(DF.columns))],
on="PAR_file",
how="left",
)
return DF
@staticmethod
def IndexPars_CB_paper():
postOVVout, SampleCodes = pd.DataFrame(), pd.DataFrame()
PostECddSeries = FindExpFolder("VERSASTAT").DestDir.joinpath(
"PostEC/{0}".format(SampleSelection.Series_CB_paper["name"])
)
PostECddSeries.mkdir(exist_ok=True, parents=True)
EIS_pars = Load_from_Indexes.EIS_pars_OVV(
postOVVout, SampleCodes, reload=False
) # EIS_Pars2
HPRR_pars = Load_from_Indexes.HPRR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # HPRR
ORR_pars = Load_from_Indexes.ORR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # ORR
Cdl_pars = Load_from_Indexes.N2_pars_OVV(reload=False)
HER_pars = Load_from_Indexes.HER_pars_OVV(postOVVout, SampleCodes, reload=False)
OER_pars = Load_from_Indexes.OER_pars_OVV(postOVVout, SampleCodes, reload=False)
CBsamples = SampleSelection.Series_CB_paper["sIDs"]
EIS_CB_paper = EIS_pars.loc[EIS_pars.SampleID.isin(CBsamples)] # 7644
HPRR_CB_paper = HPRR_pars.loc[HPRR_pars.SampleID.isin(CBsamples)]
HPRR_CB_paper.to_excel(PostECddSeries.joinpath("HPRR_CB_paper.xlsx"))
ORR_CB_paper = ORR_pars.loc[ORR_pars.SampleID.isin(CBsamples)]
ORR_CB_paper.to_excel(PostECddSeries.joinpath("ORR_CB_paper.xlsx"))
Cdl_CB_paper = Cdl_pars.loc[Cdl_pars.SampleID.isin(CBsamples)]
Cdl_CB_paper.to_excel(PostECddSeries.joinpath("Cdl_CB_paper.xlsx"))
HER_CB_paper = HER_pars.loc[HER_pars.SampleID.isin(CBsamples)]
OER_CB_paper = OER_pars.loc[OER_pars.SampleID.isin(CBsamples)]
Cdl_CB_cath, Cdl_CB_an = Cdl_CB_paper.query(
'Sweep_Type_N2 == "cathodic"'
), Cdl_CB_paper.query('Sweep_Type_N2 == "anodic"')
merge_cols_catan = [i for i in Cdl_CB_cath.columns if i in Cdl_CB_an.columns]
Cdl_CB_catan = pd.merge(
Cdl_CB_cath,
Cdl_CB_an,
on=[i for i in merge_cols_catan if i not in SampleSelection.EC_N2Cdl_cols],
how="left",
suffixes=["_cat", "_an"],
)
Cdl_CB_catan["Cdl_sum"] = Cdl_CB_catan["Cdl_an"] + Cdl_CB_catan["Cdl_cat"]
return (
EIS_CB_paper,
HPRR_CB_paper,
ORR_CB_paper,
Cdl_CB_paper,
HER_CB_paper,
OER_CB_paper,
)
@staticmethod
def IndexPars_Porph_SiO2():
postOVVout, SampleCodes = pd.DataFrame(), pd.DataFrame()
serie = SampleSelection.Series_Porhp_SiO2["sIDslice"]
EIS_pars = Load_from_Indexes.EIS_pars_OVV(
postOVVout, SampleCodes, reload=False
) # EIS_Pars2
Cdl_pars = Load_from_Indexes.N2_pars_OVV(reload=False)
EIS_Porph_SiO2 = EIS_pars.loc[EIS_pars.SampleID.isin(serie)]
Cdl_Porph_SiO2 = Cdl_pars.loc[Cdl_pars.SampleID.isin(serie)]
Cdl_Porph_SiO2_cath, Cdl_Porph_SiO2_an = Cdl_Porph_SiO2.query(
'Sweep_Type_N2 == "cathodic"'
), Cdl_Porph_SiO2.query('Sweep_Type_N2 == "anodic"')
HPRR_pars_char = Load_from_Indexes.HPRR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # HPRR
ORR_pars_char = Load_from_Indexes.ORR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # ORR
HER_pars = Load_from_Indexes.HER_pars_OVV(postOVVout, SampleCodes, reload=False)
OER_pars = Load_from_Indexes.OER_pars_OVV(postOVVout, SampleCodes, reload=False)
HPRR_Porph_SiO2 = HPRR_pars_char.loc[HPRR_pars_char.SampleID.isin(serie)]
ORR_Porph_SiO2 = ORR_pars_char.loc[ORR_pars_char.SampleID.isin(serie)]
HER_Porph_SiO2 = HER_pars.loc[Cdl_pars.SampleID.isin(serie)]
OER_Porph_SiO2 = OER_pars.loc[Cdl_pars.SampleID.isin(serie)]
return ORR_Porph_SiO2
def test_update_from_index(pars, EC_index):
_olap = pars.columns.intersection(EC_index.columns)
_olap_minus = [i for i in _olap if not "PAR_file" == i]
_mtime = [i for i in pars.columns if i.endswith("delta_mtime")]
if _mtime:
_idx = pars[_mtime[0]].idxmin()
else:
_idx = 0
_ECidx = (
EC_index.loc[EC_index.PAR_file == pars.iloc[_idx].PAR_file][_olap]
.iloc[0]
.to_dict()
)
_prsx = pars.iloc[_idx][_olap].to_dict()
_check = {
key: {"pars": val, "EC_index": _ECidx.get(key, "xx")}
for key, val in _prsx.items()
if _ECidx.get(key, "xx") != val
}
_pars_bad = False
if _check:
_pars_bad = any(
"error" in str(i) for i in [i["pars"] for i in _check.values()]
)
if _pars_bad:
_logger.info(f"Overwriting columns in Pars from EC_index")
_new_pars = pd.merge(
pars[[i for i in pars.columns if i not in _olap_minus]],
EC_index[_olap],
on="PAR_file",
how="left",
)
else:
_new_pars = pars
return _new_pars
@staticmethod
def EIS_pars_OVV(
reload=False,
extra_plotting=False,
xls_out=False,
BRUTE_out=False,
use_daily=True,
use_latest=False,
**kwargs,
):
# IndexOVV_EISpars_fn_xls = PostDestDir.joinpath('EIS_pars_IndexOVV_v{0}.xlsx'.format(FileOperations.version))
# IndexOVV_EISpars_fn = PostDestDir.joinpath('EIS_pars_IndexOVV_v{0}.pkl.compress'.format(FileOperations.version))
# PostDestDir = Load_from_Indexes.PostDestDir
# FindExpFolder('VERSASTAT').PostDir
eis_daily = get_daily_pickle(exp_type="EIS_pars")
# today = dt.datetime.now().date()
# eis_daily_pickle_path = PostDestDir.joinpath(f'{today.year}-{today.month}-{today.day}_EIS_pars_{system()}.pkl.compress')
# eis_daily_pickle_path_RAW = PostDestDir.joinpath(f'{today.year}-{today.month}-{today.day}_EIS_pars_{system()}_RAW.pkl.compress')
if eis_daily.get("_exists", False) and not reload and use_daily:
EIS_pars = pd.read_pickle(eis_daily.get("daily_path"))
EIS_pars = FileOperations.ChangeRoot_DF(EIS_pars, [], coltype="string")
_logger.info(
f'Loaded EIS_pars OVV from daily {eis_daily["today"]} pickle: {eis_daily.get("daily_path","")}'
)
elif (
eis_daily.get("daily_options", [])
and not reload
and (use_latest or use_daily)
):
EIS_pars = pd.read_pickle(eis_daily.get("daily_options")[-1])
EIS_pars = FileOperations.ChangeRoot_DF(EIS_pars, [], coltype="string")
_logger.info(
f'Loaded EIS_pars OVV from daily {eis_daily.get("daily_options")[-1]} '
)
else:
# @@ Read EIS pars files and extend with columns from Samples
# try other way:: idx_files_EIS = [list(Path(i).rglob('**/EIS/*pars_v20.xlsx')) for i in OnlyRecentMissingOVV.Dest_dir.unique() if list(Path(i).rglob('**/EIS/*pars_v20.xlsx'))]
_logger.info(
f'START reloading EIS_pars OVV from daily {eis_daily["today"]}'
)
# OnlyRecentMissingOVV = ECRunOVV(load=1).index
## ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
# OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(OnlyRecentMissingOVV,[])
# OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# OnlyRecentMissingOVV['Loading_cm2'] = OnlyRecentMissingOVV['Loading_cm2'].round(3)
# SampleCodes = SampleCodesChar().load
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
def read_df(_par_fls):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_par_fls)
if i.name.endswith("xlsx"):
_pp = pd.read_excel(i, index_col=[0])
elif i.name.endswith("pkl"):
_pp = pd.read_pickle(i)
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_pp = _pp.assign(
**{
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
)
yield _pp
except StopIteration:
return "all done"
print("gen empty")
# finally:
# yield _pp
# _pf = _pp.PAR_file.unique()[0]
# _pfstem = Path(_pf).stem
# _spectraf = list(Path(Path(i).parent).rglob(f'{_pfstem}_v{FileOperations.version}.xlsx' ))[0]
# _spectradf = pd.read_excel(_spectraf )
# yield _pp
# bn = 'O2_EIS-range_1500rpm_JOS1_285_5mV_1500rpm_pars_v20.xlsx'
EIS_OVV = EC_index.loc[EC_index.PAR_exp == "EIS"]
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
# +['PAR_file','Segment',EvRHE, 'RPM_DAC']
# [ Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' ) for d in EIS_OVV.Dest_dir.unique()]
_par_files = [
list(
Path(d.joinpath("EIS")).rglob(
f"*_pars_v{FileOperations.EIS_version}.xlsx"
)
)
for d in EIS_OVV.Dest_dir.unique()
]
_EIS_WB_files = [
list(Path(d.joinpath("EIS/lin_Warburg")).rglob(f"lin_Warburg*.pkl"))
for d in EIS_OVV.Dest_dir.unique()
]
_EIS_WB_fls = (a for i in _EIS_WB_files for a in i)
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
# tt = (i for i in _par_fls if bn in i.name)
# __ttp = list(read_df(tt, col_names))
if eis_daily.get("_raw_exists", False) and use_daily == True:
EIS_pars_all = pd.read_pickle(eis_daily.get("daily_path_RAW"))
elif (
not eis_daily.get("_raw_exists", False)
and use_daily == True
and eis_daily.get("daily_options_RAW")
):
EIS_pars_all = pd.read_pickle(eis_daily.get("daily_options_RAW")[-1])
else:
_pars_lst = list(read_df(_par_fls))
EIS_pars_RAW = pd.concat(_pars_lst, sort=False)
EIS_pars_RAW.sort_values("source_delta_mtime", inplace=True)
EIS_pars_RAW = EIS_pars_RAW.reset_index()
EIS_pars_all = EIS_pars_RAW
float_cols = set(
[
a
for i in EIS_pars_all.lmfit_var_names.unique()
if type(i) == str and not "(" in i
for a in i.split(", ")
]
)
float_cols.update(
set(
[
a
for i in float_cols
for a in EIS_pars_all.columns
if a.startswith(i)
]
)
)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].fillna(
0
)
# EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
obj_flt_cols = [
i
for i in EIS_pars_all.columns
if str(EIS_pars_all[i].dtype) == "object" and i in float_cols
]
EIS_pars_all[obj_flt_cols] = EIS_pars_all[obj_flt_cols].replace("", 0)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(
float
)
wrong_fls = [
EIS_pars_all.loc[
EIS_pars_all[i].astype(str).str.contains("Parameter")
]
for i in obj_flt_cols
]
if wrong_fls:
wrong_objflt_df = pd.concat(wrong_fls)
fix_dct = {
i: [
float(v.split("value=")[-1].split(",")[0])
for v in wrong_objflt_df[i].values
]
for i in obj_flt_cols
}
fixed_objflt_df = wrong_objflt_df.assign(**fix_dct)
EIS_pars_all = pd.concat(
[
EIS_pars_all.drop(index=wrong_objflt_df.index, axis=0),
fixed_objflt_df,
],
axis=0,
sort=True,
)
def _add_WB_pars(EIS_pars_all):
_WB_RAW_daily_path = eis_daily.get("daily_path_RAW_WB")
if _WB_RAW_daily_path.exists():
_EIS_WB_pars_all = pd.read_pickle(_WB_RAW_daily_path)
else:
_WB_lst = list(read_df(_EIS_WB_fls))
_EIS_WB_pars_all = pd.concat(
_WB_lst, sort=False, ignore_index=True
)
_EIS_WB_pars_all.to_pickle(_WB_RAW_daily_path)
_diffcols = set(EIS_pars_all.columns).difference(
_EIS_WB_pars_all.columns
)
_mcols = [
i
for i in set(EIS_pars_all.columns).intersection(
_EIS_WB_pars_all.columns
)
if i
not in [
"sourceFilename",
"source_mtime",
"source_delta_mtime",
"sourcebasename",
]
]
_dtype_mismatch = [
(i, EIS_pars_all[i].dtype, _EIS_WB_pars_all[i].dtype)
for i in _mcols
if EIS_pars_all[i].dtype != _EIS_WB_pars_all[i].dtype
]
if _dtype_mismatch:
_excl = []
for i in _dtype_mismatch:
try:
_EIS_WB_pars_all[i[0]] = _EIS_WB_pars_all[i[0]].astype(
i[1]
)
except Exception as e:
_excl.append(i[0])
print(i, "\n", e)
_mcols = [i for i in _mcols if i not in _excl]
# EIS_pars_all[i[0]] = EIS_pars_all[i[0]].astype(i[2])
_merge = pd.merge(
EIS_pars_all,
_EIS_WB_pars_all,
on=_mcols,
how="left",
suffixes=("", "_WB"),
)
if not _merge.empty:
return _merge
else:
print("WB merge was empty")
return EIS_pars_all
EIS_pars_all = _add_WB_pars(EIS_pars_all)
EIS_pars_all = EIS_pars_all.assign(
**{
"EIS_fake": [
"fakeZmean" in Path(i).name
for i in EIS_pars_all.PAR_file.to_numpy()
]
}
)
_not_in_index = EIS_pars_all.loc[
(
~(EIS_pars_all.PAR_file.isin(EC_index.PAR_file.values))
& (~EIS_pars_all.EIS_fake == True)
)
]
CleanUpCrew(
list_of_files=_not_in_index.sourceFilename.unique(), delete=True
)
EIS_pars_all = EIS_pars_all.iloc[
~(EIS_pars_all.index.isin(_not_in_index.index))
]
EIS_pars_all = Load_from_Indexes.test_update_from_index(
EIS_pars_all, EC_index
)
EIS_pars_all.to_pickle(eis_daily.get("daily_path_RAW"))
# EIS_pars_all = pd.read_pickle(eis_daily.get('daily_path_RAW'))
# === TAKING ONLY NEWEST FITTING PARS ===
#
# for n ,gr in EIS_pars_all.groupby(by=col_names):
# n,gr
E_dc_RHE_cols = [
(np.round(i, 3), np.round(i, 3) * 1e3)
for i in EIS_pars_all[EvRHE].values
]
EIS_pars_all = EIS_pars_all.assign(
**{
"E_dc_RHE": [i[0] for i in E_dc_RHE_cols],
"E_dc_RHE_mV": [i[1] for i in E_dc_RHE_cols],
}
)
EIS_pars_recent = EIS_pars_all.loc[
(EIS_pars_all.source_mtime > pd.Timestamp(dt.date(2020, 11, 25)))
& (EIS_pars_all.PAR_file.str.contains("None") == False)
]
EIS_pars_undup = EIS_pars_recent.dropna(subset=col_names).drop_duplicates(
keep="first"
)
# EIS_pars = EIS_pars.loc[EIS_pars.lmfit_var_names.str.contains('/(')]
# set([a for i in EIS_pars_all.lmfit_var_names.unique() if not '(' in i for a in i.split(', ')])
# === POST EDITING OF LOADED PARS ===
EIS_pars_undup = EIS_pars_undup.assign(
**{"Loading_cm2": EIS_pars_undup["Loading_cm2"].round(3)}
)
EIS_pars_undup = post_helper.make_uniform_EvRHE(EIS_pars_undup)
EIS_pars_undup = CollectPostOVV.MatchECconditions(EIS_pars_undup)
# EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(EC_index, EIS_pars_undup)
_oc_OVV = list(EIS_pars_undup.columns.intersection(EIS_OVV.columns))
if not set(EIS_OVV.groupby(_oc_OVV).groups.keys()).intersection(
EIS_pars_undup.groupby(_oc_OVV).groups.keys()
):
_drpcols = [
a
for a in EIS_pars_undup.columns
if (
a in [i for i in _oc_OVV if i not in "PAR_file"]
or "_".join(a.split("_")[0:-1])
in [i for i in _oc_OVV if i not in "PAR_file"]
)
]
# EIS_pars_undup.drop(columns =_drpcols)
EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(
EC_index, EIS_pars_undup.drop(columns=_drpcols)
)
# EIS_pars_undup = pd.merge(EIS_pars_undup,EIS_OVV,on=_oc_OVV, how='left')
_oc_SC = list(EIS_pars_undup.columns.intersection(SampleCodes.columns))
EIS_pars_undup = pd.merge(
EIS_pars_undup, SampleCodes, how="left", on=_oc_SC
)
EIS_pars_BRUTE = EIS_pars_undup.loc[
(EIS_pars_undup.BRUTE_FIT == 1) | (EIS_pars_undup.FINAL_FIT == 0)
]
if BRUTE_out:
EIS_pars_BRUTE.to_pickle(eis_daily["daily_path_BRUTE"])
EIS_pars = EIS_pars_undup.loc[(EIS_pars_undup.FINAL_FIT == 1)]
EIS_pars = EIS_extra_methods.add_best_model_per_spectrum(EIS_pars)
EIS_pars.to_pickle(eis_daily["daily_path"])
_logger.info(f'EIS_pars OVV to daily pickle: {eis_daily.get("daily_path")}')
_err_type = "lmfit_MSE"
_filter = "(EIS_pars.lmfit_MSE < 65E4) & (EIS_pars.Rct < 2E3) & (EIS_pars.Rct > 2E-2) \
& (EIS_pars.Rs > 0.01) & (EIS_pars.Rs < 200) & (EIS_pars.Cdlp < 0.075)\
& (EIS_pars.lmfit_redchi < 1E3) & (EIS_pars.Aw < 10E3) & (EIS_pars.Aw > 10E-2)\
& (EIS_pars.Qad < 1) & (EIS_pars.tau < 1E3)"
_filter += '& (EIS_pars.SampleID.str.contains("JOS1|JOS2|JOS3|JOS4|JOS5"))'
_filter += "& (EIS_pars.EIS_fake == False)"
_grps = ["Model_EEC", "Gas", "lmfit_var_names"][0:2]
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values("mean", ascending=True)
)
print(best_models)
keep_models = (
best_models.loc[(best_models["count"] > 5) & (best_models["std"] > 0)]
.index.get_level_values(0)
.unique()
)
EIS_pars = EIS_pars.loc[EIS_pars.Model_EEC.isin(keep_models)]
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values(["Gas", "mean"], ascending=True)
)
print(best_models)
if hasattr(EIS_pars, "best_mod_name"):
# EIS_best_mods = EIS_pars.loc[EIS_pars.Model_EEC_name.isin([i for i in EIS_pars.best_mod_name.unique() if not pd.isna(i)])]
EIS_best_mods = EIS_pars.loc[
EIS_pars.index.isin(
[i for i in EIS_pars.best_mod_n.unique() if not pd.isna(i)]
)
]
_agg = (
EIS_best_mods.dropna(subset=[_err_type])
.groupby(_grps + ["E_RHE"])[_err_type]
.agg(["count", "mean", "std"])
)
_agg_best = _agg.loc[_agg["count"] > 3].sort_values(
["Gas", "E_RHE", "mean"], ascending=True
)
# fast_checking_EEC_models =[(2, 'EEC_2CPEpRW',50),
# (3, 'EEC_2CPEpW',120),(4,'EEC_2CPE_W',100),
# (5, 'EEC_2CPE',100), (6,'EEC_Randles_RWpCPE_CPE',60)]
# # ['Model(Singh2015_RQRQR)', 'Model(Singh2015_RQRWR)', 'Model(Singh2015_R3RQ)', 'Model(Bandarenka_2011_RQRQR)' ]
if extra_plotting == "blocked":
for n, r in best_models.head(1).iterrows():
modname = r.name[0]
varnames = [
a
for i in EIS_pars.loc[
EIS_pars["Model_EEC"] == modname
].lmfit_var_names.unique()
for a in i.split(", ")
]
# [1]]+[fast_checking_EEC_models[4]]:
# modname = f'Model({_modname})'
EIS_pars_fltr = EIS_pars.loc[
(EIS_pars["Model_EEC"] == modname) & eval(_filter)
]
for var in varnames:
EIS_pars_fltr.query("pH < 7 & Rct < 2E3").plot(
y=var,
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
title=modname,
logy=0,
)
# .query('pH < 15').plot(y='Rs',x='E_RHE',c='pH',colormap='rainbow_r',kind='scatter',ylim=(0,100),title=modname)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 15").plot(
y="Qad",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 0.05),
title=modname,
)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 7").plot(
y="R_ion",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
title=modname,
)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 7").plot(
y="tau",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 100),
title=modname,
)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 7").plot(
y="Rct",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
ylim=(0.1, 1e4),
logy=True,
title=modname,
)
if (
not EIS_pars.loc[EIS_pars["Model_EEC"] == modname]
.query("pH > 7")
.empty
):
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH > 7").plot(
y="Qad+Cdlp",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
ylim=(0.1, 1e-4),
logy=True,
title=modname,
)
plt.close()
# EIS_pars.query('pH < 17').groupby('Model_EEC').plot(y='RedChisqr',x='E_RHE',colormap='viridis',kind='scatter',ax=ax)
_porph = EIS_pars.loc[EIS_pars.PAR_file.str.contains("06.05")]
fig, ax = plt.subplots()
for n, Hgr in _porph.query("pH < 7").groupby("postAST"):
c_set = "g" if n == "no" else "r"
Hgr.plot(
x="E_RHE",
y="Rct_kin",
s=50,
c=c_set,
kind="scatter",
label=n,
title="EIS, E vs Qad at",
ax=ax,
ylim=(1e-6, 1),
logy=True,
)
plt.show()
plt.close()
if "update_index" in kwargs.keys():
pass
return EIS_pars
# dest_files.append({'index' : n, 'PAR_file' : str(r.PAR_file),'EIS_dest_dir' : EIS_dest_dir,
# 'EIS_dest_Pars' : EIS_dest_dir.joinpath( Path(r.PAR_file).stem + '_pars.xlsx'),
# 'EIS_dest_spectra' :EIS_dest_dir.joinpath( Path(r.PAR_file).stem + '_Combined.xlsx')
# })
# EIS_pars_index_p1 = postOVVout.query('Type_output == "EIS_Pars1"')
## EIS_pars_index_p2 = postOVVout.query('Type_output == "EIS_Pars2"')
# EIS_pars_indexes = postOVVout.query('Type_output == "EIS_Pars"')
# if 'source' in kwargs.keys():
# EIS_pars_indexes = EIS_pars_indexes.loc[EIS_pars_indexes.Source == kwargs.get('source','ExpDirs')]
## pars_index_from_read = EIS_get_index_column_names()
## EIS_pars_index = pd.concat([EIS_pars_index_p1,EIS_pars_index_p2])
## EIS_pars_index = postOVVout.groupby('Type_output').get_group('EIS_Pars1')
# EIS_pars_spectra = postOVVout.groupby('Type_output').get_group('EIS_AllData_combined').drop_duplicates(subset=['PAR_file','DestFile','Time_since_run'])
## EPtest = EIS_pars_indexes.loc[no_match] # a slice for testing purpose
## test_load_nm = no_matches.loc[no_matches[2].str.contains('Columns not matching! "Loading_cm2" values:'),0].values
## EPtest = EIS_pars_indexes.loc[EIS_pars_indexes.index.isin(test_load_nm)]
# EISlst,no_match,faillst = [],[],[]
@staticmethod
def HPRR_pars_OVV(
postOVVout, SampleCodes, reload=False, extra_plotting=False, xls_out=False
):
# exp_type = 'H
IndexOVV_HPRRpars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"Pars_IndexOVV_HPRR_v{0}.xlsx".format(FileOperations.version)
)
if IndexOVV_HPRRpars_fn.exists() and reload != True:
HPRR_pars_char = pd.read_excel(IndexOVV_HPRRpars_fn, index_col=[0])
HPRR_pars_char = FileOperations.ChangeRoot_DF(
HPRR_pars_char, [], coltype="string"
)
else:
# === Making destination directories === #
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PPD_HPRR = PostDestDir.joinpath("HPRR")
PPD_HPRR.mkdir(parents=True, exist_ok=True)
PPD_HPRR_data = PPD_HPRR.joinpath("DataFiles")
PPD_HPRR_data.mkdir(parents=True, exist_ok=True)
# # === Loading Index files for HPRR and reading the Parameters files into one DataFrame === #
HPRR_pars_index = postOVVout.groupby("Type_output").get_group("HPRR")
HP_Pars_files = [
Path(i)
for i in HPRR_pars_index["SourceFilename"].unique()
if "_Pars" in Path(i).stem
]
HPRR_pars_raw = pd.concat(
[pd.read_excel(i, index_col=[0]) for i in HP_Pars_files], sort=False
)
HPRR_pars_raw = FileOperations.ChangeRoot_DF(
HPRR_pars_raw, [], coltype="string"
)
HPRR_merge_cols = [
i
for i in HPRR_pars_raw.columns
if i in HPRR_pars_index.columns and not "Segment" in i
]
HPRR_p2, HPRR_ovv2 = HPRR_pars_raw.set_index(
HPRR_merge_cols
), HPRR_pars_index.set_index(HPRR_merge_cols)
HPRR_pars_ovv = HPRR_p2.join(HPRR_ovv2, rsuffix="_ovv").reset_index()
HPRR_pars_merge_cols = [
i
for i in HPRR_pars_ovv.columns
if i in postOVVout.columns and not "Segment" in i and not "Unnamed" in i
]
HPRR_pars = pd.merge(
HPRR_pars_ovv, postOVVout, on=HPRR_pars_merge_cols, how="left"
)
# HPRR_pars = pd.merge(HPRR_pars_ovv,postOVVout,on='PAR_file',how='left')
print(
"Leftover SampleIDs: {0}".format(
set(HPRR_pars.SampleID.unique())
- set(SampleCodes.SampleID.unique())
)
)
HPRR_char_merge_cols = [
i
for i in HPRR_pars_ovv.columns
if i in SampleCodes.columns
if not "Unnamed" in i
]
HPRR_pars_char = pd.merge(
HPRR_pars_ovv, SampleCodes, on=HPRR_char_merge_cols, how="left"
)
HPRR_pars_char = HPRR_pars_char.drop(
columns=[i for i in HPRR_pars_char.columns if "Unnamed" in i]
)
new_IndexOVV_HPRRpars_target = FileOperations.CompareHashDFexport(
HPRR_pars_char, IndexOVV_HPRRpars_fn
)
_logger.info(
"PostEC HPRR re-indexed and saved: {0}".format(
new_IndexOVV_HPRRpars_target
)
)
if extra_plotting:
try:
HPRR_pars_char.query(
'(RPM_HPRR > 700) & (Loading_cm2 > 0.1) & (E_name == "E_j0")'
).plot(x="AD/AG", y="fit_slope_HPRR", kind="scatter")
except Exception as e:
print("HPRR plot fail:", e)
try:
HPRR_pars_char.query(
'(RPM_HPRR > 700) & (Loading_cm2 > 0.1) & (E_name == "E_j0")'
).plot(x="N_content", y="fit_slope_HPRR", kind="scatter")
except Exception as e:
print("HPRR plot fail:", e)
return HPRR_pars_char
@staticmethod
def HER_pars_OVV(reload=False, use_daily=True, extra_plotting=False, xls_out=False):
# exp_type = 'H
# PostDestDir = Load_from_Indexes.PostDestDir
her_daily = get_daily_pickle(exp_type="HER_pars")
# IndexOVV_HER_pars_fn = FindExpFolder('VERSASTAT').PostDir.joinpath('Pars_IndexOVV_HER_v{0}.pkl.compress'.format(FileOperations.version))
if her_daily.get("_exists", False) and reload != True:
# Cdl_pars_char = pd.read_excel(IndexOVV_N2_pars_fn,index_col=[0])
HER_pars_char = pd.read_pickle(her_daily.get("daily_path"))
HER_pars_char = FileOperations.ChangeRoot_DF(
HER_pars_char, [], coltype="string"
)
else:
# @@ Check POST_AST status from OVV and PRM
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
def read_df(_par_fls, read_types=["HER_pars"]):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_par_fls)
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_i_stem = i.stem
_pparts = i.parent.parts
if f"HER_v{FileOperations.version}" in _pparts[-2]:
if _i_stem.startswith("HER") or "HER" in _i_stem.split("_"):
# any([_i_stem.startswith(_p) for _p in ['N2_HER|N2_EIS']]):
_type = "HER_pars"
else:
_type = "HER_unknown"
else:
_type = "_unknown"
_meta = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"source_basename": _i_stem,
"source_type": _type,
}
if _type in read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(
_pp, [], coltype="string"
)
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
if not "Analysis_date" in _pp.columns:
_pp = _pp.assign(
**{
"Analysis_date": dt.datetime.fromtimestamp(
i.stat().st_ctime
)
}
)
_meta.update({"DF": _pp})
yield _meta
except StopIteration:
return "all done"
print("gen empty")
if her_daily.get("_raw_exists", False) and use_daily:
HER_pars_all = pd.read_pickle(her_daily.get("daily_path_RAW"))
elif her_daily.get("daily_options_RAW", False) and use_daily:
HER_pars_all = pd.read_pickle(her_daily.get("daily_options_RAW")[-1])
else: # Construct new N2 pars ovv from reading in files
HER_OVV = EC_index.loc[EC_index.PAR_exp.str.contains("HER")]
_par_files = [
list(
Path(d.joinpath(f"HER_v{FileOperations.version}")).rglob(
"*xlsx"
)
)
for d in HER_OVV.Dest_dir.unique()
]
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
_par_reads = read_df(_par_fls, read_types=["HER_pars"])
_reads_out = [i for i in _par_reads]
HER_pars_all = pd.concat(
[i["DF"] for i in _reads_out], sort=False, ignore_index=True
)
not_in_index = HER_pars_all.loc[
~HER_pars_all.PAR_file.isin(EC_index.PAR_file.values)
]
if not_in_index.empty:
print("HER pars, not-in-index is empty... success!")
else:
print("HER pars, not-in-index is NOT empty... delete wrong pars??")
# CleanUpCrew(list_of_files = not_in_index.SourceFilename.unique(), delete = True)
HER_pars_all = HER_pars_all.loc[
HER_pars_all.PAR_file.isin(EC_index.PAR_file.values)
]
HER_pars_recent = HER_pars_all.loc[
HER_pars_all.Analysis_date > dt.datetime.fromisoformat("2020-07-15")
]
for n, gr in HER_pars_recent.groupby("_type"):
print(
n,
f" len {len(gr)}",
f'\nSamples: {", ".join([str(i) for i in gr.SampleID.unique()])}',
)
HER_pars_recent.to_pickle(her_daily["daily_path_RAW"])
# ORR_merge_cols = [i for i in ORR_pars.columns if i in ORR_pars_index.columns and not 'Segment' in i]
# p2,ovv2 = ORR_pars.dropna(subset=ORR_merge_cols).set_index(ORR_merge_cols), ORR_pars_index.dropna(subset=ORR_merge_cols).set_index(ORR_merge_cols)
# ORR_pars_ovv = p2.join(ovv2,rsuffix='_ovv').reset_index()
# ORR_pars_ovv.query('(pH < 7)').plot(y='E_onset',x='Loading_cm2',kind='scatter',logy=False)
# ORR_pars_ovv = pd.merge(ORR_pars,ORR_pars_index,on=ORR_merge_cols,suffixes=('','_ovv'),how='left')
# ORR_pars = pd.merge(ORR_pars,postOVVout,on=['PAR_file','SampleID','Electrolyte','pH','postAST'],how='left',suffixes=('','_ovv'))
# print('Leftover SampleIDs: {0}'.format(set(ORR_pars.SampleID.unique()) - set(SampleCodes.SampleID.unique())))
HER_pars_char = pd.merge(
HER_pars_recent, SampleCodes, on="SampleID", how="left"
)
HER_pars_char = pd.merge(
HER_pars_char, EC_index, on="PAR_file", suffixes=("", "_index")
)
### Fixing the pars after loading...
# TODO : taking out duplicates based on time_since_run....
Load_na = HER_pars_char.loc[HER_pars_char.Loading_cm2.isna()]
if not Load_na.empty:
Load_na_missingvalues = [
(n, *GetSampleID.ink_loading_from_filename(i.PAR_file))
for n, i in Load_na.iterrows()
]
Load_na_vals = (
pd.DataFrame(Load_na_missingvalues)
.rename(columns={1: "Loading_name", 2: "Loading_cm2"})
.set_index([0])
)
HER_pars_char.Loading_cm2.fillna(
value=Load_na_vals.Loading_cm2, inplace=True
)
# ORR_char_merge_cols = [i for i in ORR_pars.columns if i in SampleCodes.columns]
# ORR_pars_char = pd.merge(ORR_pars,SampleCodes,on=ORR_char_merge_cols,how='left')
HER_pars_char = HER_pars_char.drop(
columns=[i for i in HER_pars_char.columns if "Unnamed" in i]
)
if HER_pars_char.loc[HER_pars_char.Loading_cm2.isna()].empty == False:
HER_pars_char.Loading_cm2 = HER_pars_char.Loading_cm2.fillna(
value=0.379
) # fillna for Loading_cm2
HER_pars_char.Loading_cm2 = HER_pars_char.Loading_cm2.round(3)
HER_pars_char.HER_at_E_slice = HER_pars_char.HER_at_E_slice.round(3)
if HER_pars_char.postAST.dropna().empty:
HER_pars_char = HER_pars_char.drop(columns="postAST")
# _int = list(set(ORR_pars_char.columns).intersection(set(EC_index.columns)))
HER_pars_char = pd.merge(
HER_pars_char, EC_index, on="PAR_file", suffixes=("", "_index")
)
HER_pars_char = make_uniform_RPM_DAC(HER_pars_char)
# ORR_pars_char = pd.merge(ORR_pars_char, EC_index[['PAR_file', 'postAST']], on = 'PAR_file')
_sgdct = []
for pf, pfgrp in HER_pars_char.groupby("PAR_file"):
_segs = pfgrp["Segment #"].unique()
for _n, _seg in enumerate(_segs):
_sgdct.append({"PAR_file": pf, "Segment #": _seg, "HER_Segnum": _n})
_HER_segnums = pd.DataFrame(_sgdct)
HER_pars_char = pd.merge(
HER_pars_char, _HER_segnums, on=["PAR_file", "Segment #"]
)
# ORR_pars_char.loc[ORR_pars_char.Loading_cm2.isna() == True]
# if xls_out:
# IndexOVV_HER_pars_fn = FileOperations.CompareHashDFexport(HER_pars_char,IndexOVV_HER_pars_fn)
HER_pars_char.to_pickle(her_daily["daily_path"])
if extra_plotting:
jmA2_slice = HER_pars_char.loc[(HER_pars_char["Segment #"] > 1)].query(
'(HER_type == "j_slice_onset") & (HER_at_J_slice == -2)'
)
jmA2_slice.plot(
x="Metal_wt", y="HER_Tafel_slope", kind="scatter", ylim=(0, 1e3)
)
jmA2_slice.plot(
x="N_content",
y="HER_Tafel_slope",
s=50,
c="g",
kind="scatter",
ylim=(0, 1e3),
)
# HER_atE = HER_pars_char.loc[(HER_pars_char['Segment #'] > 1) & np.isclose(HER_pars_char[EvRHE+'_upper'],-0.3,atol=0.02)].query('(E_type == "E_slice")')
if extra_plotting:
E_350mV_slice = HER_pars_char.loc[
(HER_pars_char["Segment #"] > 1)
].query(
'(HER_type == "E_slice") & (HER_at_E_slice < -0.29) & (HER_at_E_slice > -0.33)'
)
fig, ax = plt.subplots()
for n, Hgr in E_350mV_slice.groupby(["postAST", "RPM"]):
c_set = "g" if "no" in n else "r"
_ms_set = "o" if n[-1] < 100 else "*"
Hgr.plot(
x="N_content",
y="HER_J_upper",
s=50,
c=c_set,
kind="scatter",
label=n,
title="HER at -0.3 Vrhe, j vs N_content",
ax=ax,
**{"marker": _ms_set},
)
E_350mV_slice.plot(
x="N_content",
y="HER_J_upper",
kind="bar",
title="HER, j vs N_content at",
)
E_350mV_slice.plot(
x="BET_cat_agg",
y="HER_J_upper",
s=50,
c="g",
kind="scatter",
title="HER, j vs N_content at",
)
return HER_pars_char
def old_HER():
if IndexOVV_HER_pars_fn.exists() and reload is not True:
HER_pars_char =
|
pd.read_pickle(IndexOVV_HER_pars_fn)
|
pandas.read_pickle
|
import pandas as pd
intervention_df =
|
pd.read_csv("intervention_data.csv",dtype=str)
|
pandas.read_csv
|
import os
import getpass
import logging # For warning
from datetime import datetime
import time
import re
import pandas as pd
import numpy as np
import shutil # Notably for copyfile
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
import jenkspy # Jenks clustering
import smtplib
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email import encoders
# Set / fix styles issues
sns.set_style()
pd.DataFrame._repr_latex_ = lambda self: """\centering{}""".format(self.to_latex())
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# Constants
_FAIL_LETTER = 'f'
_ALL_LETTERS = ['f', 'd', 'd+', 'c', 'c+', 'b', 'b+', 'a', 'a+']
_ALL_PASSING_LETTERS = ['d', 'd+', 'c', 'c+', 'b', 'b+', 'a', 'a+']
_ALL_PASSING_NORMAL_LETTERS = ['d', 'd+', 'c', 'c+', 'b', 'b+', 'a']
# Text strings for automated messages
_txt_salutation = """\
<html>
<body>
<p> Bonjour {} {}, <br><br>
"""
_txt_end = """ </p>
</body>
</html>
"""
_txt_score_overview = """
La moyenne du groupe est de {:.1f} points, et sa note médiane est de {:.1f} points. Vous avez obtenu une note de {:.1f} points.
<hr>
Voici le détail de vos points:
""".replace('\n', '<br>')
_txt_score_details = """
{} : {} points sur {}
""".replace('\n', '<br>')
_txt_mistakes_details = """
<hr>
Et voici le détail des points perdus:
{}
<hr> """.replace('\n', '<br>')
def correction_parser(filename, exam_name):
""" Reads in correction template and generates `Grader` object
Parameters
----------
filename : str
exam_name : str
Returns
-------
Grader object
"""
with pd.ExcelFile(filename) as f:
raw = pd.read_excel(f, sheet_name='Corrections', header=0, index_col=0)
raw_codes = pd.read_excel(f, sheet_name='codes', header=0, index_col=[0, 1])
universal_codes = pd.read_excel(f, sheet_name='codes_universels', header=0, index_col=0)
totals = pd.read_excel(f, sheet_name='totaux', header=0, index_col=0).squeeze()
init = pd.read_excel(f, sheet_name='init', header=0, index_col=0).squeeze()
versions = pd.read_excel(f, sheet_name='versions', header=0, index_col=0).squeeze()
return Grader(exam_name, raw, raw_codes, universal_codes, totals, init, versions)
class Grader:
def __init__(self, exam_name, raw_corr, raw_codes, universal_codes, totals, init, versions=None):
"""
Grader class to contain raw correction data and processed grades
Parameters
----------
exam_name : str
Name of exam, used for documentation
raw_corr: DataFrame
Correction codes for each student (row) and each question (columns) read from template
raw_codes: DataFrame
Definition and weight of each correction/error code; A negative "penalty" means that points are being added.
universal_codes : DataFrame
Definition and weight (relative and/or absolute penalty) of error codes that can apply to any question
totals : Data Series
Total points for each question
versions: DataFrame (optional)
For each question (row) and each version of the exam (columns, 'A', 'B', etc.) the name/number of the
question as seen by the student on the exam.
"""
# Data from tremplate
self.exam_name = exam_name
self.raw_corr = raw_corr
self.raw_codes = raw_codes
self.universal_codes = universal_codes
self.totals = totals
self.init = init
self.versions = versions
# Constants - hardcoded
self._OK_TERMS = ['ok', '0', '', 'OK']
self._CONTACT_COLS = ['prénom', 'nom', 'courriel']
self._COLS_TO_ALWAYS_DROP = ['version', 'Exemple/explication']
# Refined data
self.contacts = raw_corr[self._CONTACT_COLS]
self.corr = raw_corr.drop(self._CONTACT_COLS, axis=1)
# Semi-final variables
self.correction_matrix = None
self.codes = None
# Variables finales
self.grades = None
self.message = dict()
self.message['salutation'] = _txt_salutation
self.message['foreword'] = ""
self.message['score_overview'] = _txt_score_overview
self.message['score_details'] = _txt_score_details
self.message['mistakes_details'] = _txt_mistakes_details
self.message['closing'] = ""
def calc_grades(self, cols_to_drop=None):
"""
The main method. Calculates the grade of each student.
Calls in sequence `_pivot_corr()`, `_clean_codes()`, `_check_sanity_and_harmonize()`, and `_calc_grades()`
Parameters
----------
cols_to_drop : list
Columns in the template to ignore in the calculation process (custom additionnal columns, etc.)
"""
if cols_to_drop is None:
cols_to_drop = []
self._pivot_corr(cols_to_drop=cols_to_drop)
self._clean_codes(cols_to_drop=cols_to_drop)
self._check_sanity_and_harmonize()
self._calc_grades()
def _pivot_corr(self, cols_to_drop=None):
""" Pivot the correction comments in raw_corr into a binary matrix
For each student, we go from a list of error codes to a binary matrix indicating which students (rows) did
what mistake (columns, level 1) in what question (columns, level 0)
Parameters
----------
cols_to_drop : list
Columns in the template to ignore in the calculation process (custom additionnal columns, etc.)
"""
cols_to_drop = self._COLS_TO_ALWAYS_DROP + cols_to_drop
# Define dataframe with multi-index columns capturing all correction types
for cx in self.corr.columns:
all_codes = self.corr[cx].dropna().unique().tolist()
all_codes = {y for x in all_codes for y in _clean(x)}
new_cx = pd.MultiIndex.from_product([[cx], all_codes])
try:
mcx = mcx.append(new_cx)
except NameError:
mcx = new_cx
correction_matrix = pd.DataFrame(0.0, index=self.corr.index, columns=mcx)
# Fill
for ix in self.corr.index:
for cx in self.corr.columns:
erreurs = _clean(self.corr.loc[ix, cx])
for err in erreurs:
correction_matrix.loc(axis=0)[ix].loc[[cx], err] += 1
# Remove other stuff
if cols_to_drop is not None:
correction_matrix = correction_matrix.drop(cols_to_drop, axis=1, level=0, errors='ignore')
correction_matrix = correction_matrix.reindex(columns=self.totals.index, level=0)
# Enlève OK
self.correction_matrix = correction_matrix.drop(labels=self._OK_TERMS, axis=1, level=1, errors='ignore')
def _clean_codes(self, cols_to_drop):
""" Clean correction codes and weighting (drop empty, expand universal correction codes, etc.)
Parameters
----------
cols_to_drop : list
Columns in the template to ignore in the calculation process (custom additionnal columns, etc.)
"""
# Drop extraneous columns
cols_to_drop = self._COLS_TO_ALWAYS_DROP + cols_to_drop
codes = self.raw_codes.drop(cols_to_drop, axis=1, errors='ignore')
# Drop empty rows
todrop = codes['points'].isna()
self.codes = codes.loc[~todrop]
# Scale and insert universal codes
for ix, v in self.totals.items():
scaled_codes = (self.universal_codes[['pénalités relatives', 'pénalités_absolues']] * [v, 1])
selected_codes = scaled_codes.dropna(axis=0, how='all').min(axis=1, skipna=True).to_frame('points')
new_codes =
|
pd.concat([self.universal_codes['définition'], selected_codes], axis=1, join='inner')
|
pandas.concat
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype,
)
from pandas.core.index import _ensure_index
from pandas.core.base import DataError
from modin.error_message import ErrorMessage
from modin.engines.base.block_partitions import BaseBlockPartitions
class PandasQueryCompiler(object):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self,
block_partitions_object: BaseBlockPartitions,
index: pandas.Index,
columns: pandas.Index,
dtypes=None,
):
assert isinstance(block_partitions_object, BaseBlockPartitions)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
"""By default, constructor method will invoke an init"""
return type(self)(block_paritions_object, index, columns, dtypes)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
map_func = self._prepare_method(lambda df: df.dtypes)
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
self._dtype_cache = self.data.full_reduce(map_func, dtype_builder, 0)
self._dtype_cache.index = self.columns
elif not self._dtype_cache.index.equals(self.columns):
self._dtype_cache.index = self.columns
return self._dtype_cache
def _set_dtype(self, dtypes):
self._dtype_cache = dtypes
dtypes = property(_get_dtype, _set_dtype)
# These objects are currently not distributed.
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _validate_set_axis(self, new_labels, old_labels):
new_labels = _ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = _ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = _ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
# END Index and columns objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix):
new_column_names = self.columns.map(lambda x: str(prefix) + str(x))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
def add_suffix(self, suffix):
new_column_names = self.columns.map(lambda x: str(x) + str(suffix))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(), self.index.copy(), self.columns.copy(), self._dtype_cache
)
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if isinstance(other, list):
return self._join_list_of_managers(other, **kwargs)
else:
return self._join_query_compiler(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
# Concatenating two managers requires aligning their indices. After the
# indices are aligned, it should just be a simple concatenation of the
# `BaseBlockPartitions` objects. This should not require remote compute.
joined_axis = self._join_index_objects(
axis,
[other.columns if axis == 0 else other.index for other in others],
join,
sort=sort,
)
# Since we are concatenating a list of managers, we will align all of
# the indices based on the `joined_axis` computed above.
to_append = [other.reindex(axis ^ 1, joined_axis).data for other in others]
new_self = self.reindex(axis ^ 1, joined_axis).data
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_query_compiler(self, other, **kwargs):
assert isinstance(
other, type(self)
), "This method is for data manager objects only"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(1, other.index, how, sort=sort)
to_join = other.reindex(0, joined_index).data
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# We are using proxy DataFrame objects to build the columns based on
# the `lsuffix` and `rsuffix`.
self_proxy = pandas.DataFrame(columns=self.columns)
other_proxy = pandas.DataFrame(columns=other.columns)
new_columns = self_proxy.join(
other_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of DataManager objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(
1, [other.index for other in others], how, sort=sort
)
to_join = [other.reindex(0, joined_index).data for other in others]
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
self_proxy = pandas.DataFrame(columns=self.columns)
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
joined_index = self._join_index_objects(1, other.index, how_to_join, sort=False)
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
reindexed_other = other.reindex(0, joined_index).data
reindexed_self = self.reindex(0, joined_index).data
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, self_cols, other_cols, func):
left.columns = self_cols
right.columns = other_cols
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1,
lambda l, r: inter_data_op_builder(l, r, self_cols, other_cols, func),
reindexed_other,
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
"""
axis = pandas.DataFrame()._get_axis_number(kwargs.get("axis", 0))
if isinstance(other, type(self)):
return self.inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self.scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
def add(self, other, **kwargs):
"""Adds this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with added data and new index.
"""
func = pandas.DataFrame.add
return self._inter_df_op_handler(func, other, **kwargs)
def div(self, other, **kwargs):
"""Divides this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.div
return self._inter_df_op_handler(func, other, **kwargs)
def eq(self, other, **kwargs):
"""Compares equality (==) with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.eq
return self._inter_df_op_handler(func, other, **kwargs)
def floordiv(self, other, **kwargs):
"""Floordivs this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with floordiv-ed data and index.
"""
func = pandas.DataFrame.floordiv
return self._inter_df_op_handler(func, other, **kwargs)
def ge(self, other, **kwargs):
"""Compares this manager >= than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.ge
return self._inter_df_op_handler(func, other, **kwargs)
def gt(self, other, **kwargs):
"""Compares this manager > than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.gt
return self._inter_df_op_handler(func, other, **kwargs)
def le(self, other, **kwargs):
"""Compares this manager < than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.le
return self._inter_df_op_handler(func, other, **kwargs)
def lt(self, other, **kwargs):
"""Compares this manager <= than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.lt
return self._inter_df_op_handler(func, other, **kwargs)
def mod(self, other, **kwargs):
"""Mods this manager against other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with mod-ed data and index.
"""
func = pandas.DataFrame.mod
return self._inter_df_op_handler(func, other, **kwargs)
def mul(self, other, **kwargs):
"""Multiplies this manager against other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with multiplied data and index.
"""
func = pandas.DataFrame.mul
return self._inter_df_op_handler(func, other, **kwargs)
def ne(self, other, **kwargs):
"""Compares this manager != to other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.ne
return self._inter_df_op_handler(func, other, **kwargs)
def pow(self, other, **kwargs):
"""Exponential power of this manager to other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with pow-ed data and index.
"""
func = pandas.DataFrame.pow
return self._inter_df_op_handler(func, other, **kwargs)
def rdiv(self, other, **kwargs):
"""Divides other object (manager or scalar) with this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.rdiv
return self._inter_df_op_handler(func, other, **kwargs)
def rfloordiv(self, other, **kwargs):
"""Floordivs this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with floordiv-ed data and index.
"""
func = pandas.DataFrame.rfloordiv
return self._inter_df_op_handler(func, other, **kwargs)
def rmod(self, other, **kwargs):
"""Mods this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with mod data and index.
"""
func = pandas.DataFrame.rmod
return self._inter_df_op_handler(func, other, **kwargs)
def rpow(self, other, **kwargs):
"""Exponential power of other object (manager or scalar) to this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with pow-ed data and new index.
"""
func = pandas.DataFrame.rpow
return self._inter_df_op_handler(func, other, **kwargs)
def rsub(self, other, **kwargs):
"""Subtracts other object (manager or scalar) from this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with subtracted data and new index.
"""
func = pandas.DataFrame.rsub
return self._inter_df_op_handler(func, other, **kwargs)
def sub(self, other, **kwargs):
"""Subtracts this manager from other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with subtracted data and new index.
"""
func = pandas.DataFrame.sub
return self._inter_df_op_handler(func, other, **kwargs)
def truediv(self, other, **kwargs):
"""Divides this manager with other object (manager or scalar).
Functionally same as div
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.truediv
return self._inter_df_op_handler(func, other, **kwargs)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.clip, **kwargs)
if is_list_like(lower) or is_list_like(upper):
df = self.map_across_full_axis(axis, func)
return self.__constructor__(df, self.index, self.columns)
return self.scalar_operations(axis, lower or upper, func)
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
# We are required to perform this reindexing on everything to
# shuffle the data together
reindexed_cond = cond.reindex(0, self.index).data
reindexed_other = other.reindex(0, self.index).data
reindexed_self = self.reindex(0, self.index).data
first_pass = reindexed_cond.inter_data_operation(
1,
lambda l, r: where_builder_first_pass(l, r, **kwargs),
reindexed_other,
)
final_pass = reindexed_self.inter_data_operation(
1, lambda l, r: where_builder_second_pass(l, r, **kwargs), first_pass
)
return self.__constructor__(final_pass, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = [i for i in range(len(other))]
def where_builder_series(df, cond, other, **kwargs):
return df.where(cond, other, **kwargs)
reindexed_self = self.reindex(
axis, self.index if not axis else self.columns
).data
reindexed_cond = cond.reindex(
axis, self.index if not axis else self.columns
).data
new_data = reindexed_self.inter_data_operation(
axis,
lambda l, r: where_builder_series(l, r, other, **kwargs),
reindexed_cond,
)
return self.__constructor__(new_data, self.index, self.columns)
# END Inter-Data operations
# Single Manager scalar operations (e.g. add to scalar, list of scalars)
def scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
New DataManager with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self.map_partitions(func)
# END Single Manager scalar operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
New DataManager with updated data and new index.
"""
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
New DataManager with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = "index" if "index" not in self.columns else "level_0"
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
#
# _is_transposed, 0 for False or non-transposed, 1 for True or transposed.
_is_transposed = 0
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the
new_manager = self.__constructor__(new_data, self.columns, self.index)
# It is possible that this is already transposed
new_manager._is_transposed = self._is_transposed ^ 1
return new_manager
# END Transpose
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_reduce(self, axis, map_func, reduce_func=None, numeric_only=False):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
numeric_only: Apply only over the numeric rows.
Return:
Returns Pandas Series containing the results from map_func and reduce_func.
"""
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
else:
query_compiler = self
if reduce_func is None:
reduce_func = map_func
# The XOR here will ensure that we reduce over the correct axis that
# exists on the internal partitions. We flip the axis
result = query_compiler.data.full_reduce(
map_func, reduce_func, axis ^ self._is_transposed
)
if result.shape == (0,):
return result
elif not axis:
result.index = query_compiler.columns
else:
result.index = query_compiler.index
return result
def _process_min_max(self, func, **kwargs):
"""Calculates the min or max of the DataFrame.
Return:
Pandas series containing the min or max values from each column or
row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
def min_max_builder(df, **kwargs):
if not df.empty:
return func(df, **kwargs)
map_func = self._prepare_method(min_max_builder, **kwargs)
return self.full_reduce(axis, map_func, numeric_only=numeric_only)
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
Pandas series containing counts of non-NaN objects from each column or row.
"""
axis = kwargs.get("axis", 0)
numeric_only = kwargs.get("numeric_only", False)
map_func = self._prepare_method(pandas.DataFrame.count, **kwargs)
reduce_func = self._prepare_method(pandas.DataFrame.sum, **kwargs)
return self.full_reduce(axis, map_func, reduce_func, numeric_only)
def max(self, **kwargs):
"""Returns the maximum value for each column or row.
Return:
Pandas series with the maximum values from each column or row.
"""
return self._process_min_max(pandas.DataFrame.max, **kwargs)
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
Pandas series containing the mean from each numerical column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
try:
# If we need to drop any columns, it will throw a TypeError
return sums.divide(counts)
# In the case that a TypeError is thrown, we need to iterate through, similar to
# how pandas does and do the division only on things that can be divided.
# NOTE: We will only hit this condition if numeric_only is not True.
except TypeError:
def can_divide(l, r):
try:
pandas.Series([l]).divide(r)
except TypeError:
return False
return True
# Iterate through the sums to check that we can divide them. If not, then
# drop the record. This matches pandas behavior.
return pandas.Series(
{
idx: sums[idx] / counts[idx]
for idx in sums.index
if can_divide(sums[idx], counts[idx])
}
)
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
Pandas series with the minimum value from each column or row.
"""
return self._process_min_max(pandas.DataFrame.min, **kwargs)
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
Pandas Series with sum or prod of DataFrame.
"""
axis = kwargs.get("axis", 0)
numeric_only = kwargs.get("numeric_only", None) if not axis else True
min_count = kwargs.get("min_count", 0)
reduce_index = self.columns if axis else self.index
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
else:
query_compiler = self
new_index = query_compiler.index if axis else query_compiler.columns
def sum_prod_builder(df, **kwargs):
if not df.empty:
return func(df, **kwargs)
else:
return pandas.DataFrame([])
map_func = self._prepare_method(sum_prod_builder, **kwargs)
if min_count <= 1:
return self.full_reduce(axis, map_func, numeric_only=numeric_only)
elif min_count > len(reduce_index):
return pandas.Series(
[np.nan] * len(new_index), index=new_index, dtype=np.dtype("object")
)
else:
return self.full_axis_reduce(map_func, axis)
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
Pandas series with the product of each numerical column or row.
"""
return self._process_sum_prod(pandas.DataFrame.prod, **kwargs)
def sum(self, **kwargs):
"""Returns the sum of each numerical column or row.
Return:
Pandas series with the sum of each numerical column or row.
"""
return self._process_sum_prod(pandas.DataFrame.sum, **kwargs)
# END Full Reduce operations
# Map partitions operations
# These operations are operations that apply a function to every partition.
def map_partitions(self, func, new_dtypes=None):
return self.__constructor__(
self.data.map_across_blocks(func), self.index, self.columns, new_dtypes
)
def abs(self):
func = self._prepare_method(pandas.DataFrame.abs)
return self.map_partitions(func, new_dtypes=self.dtypes.copy())
def applymap(self, func):
remote_func = self._prepare_method(pandas.DataFrame.applymap, func=func)
return self.map_partitions(remote_func)
def isin(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.isin, **kwargs)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def isna(self):
func = self._prepare_method(pandas.DataFrame.isna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def isnull(self):
func = self._prepare_method(pandas.DataFrame.isnull)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def negative(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.__neg__, **kwargs)
return self.map_partitions(func)
def notna(self):
func = self._prepare_method(pandas.DataFrame.notna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def notnull(self):
func = self._prepare_method(pandas.DataFrame.notnull)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def round(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.round, **kwargs)
return self.map_partitions(func, new_dtypes=self._dtype_cache)
# END Map partitions operations
# Map partitions across select indices
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if dtype != self.dtypes[column]:
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
new_dtype = np.dtype(dtype)
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
# END Map partitions across select indices
# Column/Row partitions reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_axis_reduce(self, func, axis, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
# We XOR with axis because if we are doing an operation over the columns
# (i.e. along the rows), we want to take the transpose so that the
# results from the same parition will be concated together first.
# We need this here because if the operations is over the columns,
# map_across_full_axis does not transpose the result before returning.
result = self.data.map_across_full_axis(axis, func).to_pandas(
self._is_transposed ^ axis
)
if result.empty:
return result
if not axis:
result.index = (
alternate_index if alternate_index is not None else self.columns
)
else:
result.index = (
alternate_index if alternate_index is not None else self.index
)
return result
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
def any(self, **kwargs):
"""Returns whether any the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return self._process_all_any(lambda df, **kwargs: df.any(**kwargs), **kwargs)
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
Pandas Series containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis_none = True if axis is None else False
axis = 0 if axis is None else axis
kwargs["axis"] = axis
bool_only = kwargs.get("bool_only", None)
kwargs["bool_only"] = False if bool_only is None else bool_only
not_bool_col = []
numeric_col_count = 0
for col, dtype in zip(self.columns, self.dtypes):
if not is_bool_dtype(dtype):
not_bool_col.append(col)
numeric_col_count += 1 if is_numeric_dtype(dtype) else 0
if bool_only:
if axis == 0 and not axis_none and len(not_bool_col) == len(self.columns):
return pandas.Series(dtype=bool)
if len(not_bool_col) == len(self.columns):
query_compiler = self
else:
query_compiler = self.drop(columns=not_bool_col)
else:
if (
bool_only is False
and axis_none
and len(not_bool_col) == len(self.columns)
and numeric_col_count != len(self.columns)
):
if func == pandas.DataFrame.all:
return self.getitem_single_key(self.columns[-1])[self.index[-1]]
elif func == pandas.DataFrame.any:
return self.getitem_single_key(self.columns[0])[self.index[0]]
query_compiler = self
builder_func = query_compiler._prepare_method(func, **kwargs)
result = query_compiler.full_axis_reduce(builder_func, axis)
if axis_none:
return func(result)
else:
return result
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._prepare_method(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index.
first_result = self.full_axis_reduce(func, 0)
return self.index[first_result.min()]
def _post_process_idx_ops(self, axis, intermediate_result):
"""Converts internal index to external index.
Args:
axis: 0 for columns and 1 for rows. Defaults to 0.
intermediate_result: Internal index of self.data.
Returns:
External index of the intermediate_result.
"""
index = self.index if not axis else self.columns
result = intermediate_result.apply(lambda x: index[x])
return result
def idxmax(self, **kwargs):
"""Returns the first occurance of the maximum over requested axis.
Returns:
Series containing the maximum of each column or axis.
"""
# The reason for the special treatment with idxmax/min is because we
# need to communicate the row number back here.
def idxmax_builder(df, **kwargs):
df.index = pandas.RangeIndex(len(df.index))
return df.idxmax(**kwargs)
axis = kwargs.get("axis", 0)
func = self._prepare_method(idxmax_builder, **kwargs)
max_result = self.full_axis_reduce(func, axis)
# Because our internal partitions don't track the external index, we
# have to do a conversion.
return self._post_process_idx_ops(axis, max_result)
def idxmin(self, **kwargs):
"""Returns the first occurance of the minimum over requested axis.
Returns:
Series containing the minimum of each column or axis.
"""
# The reason for the special treatment with idxmax/min is because we
# need to communicate the row number back here.
def idxmin_builder(df, **kwargs):
df.index = pandas.RangeIndex(len(df.index))
return df.idxmin(**kwargs)
axis = kwargs.get("axis", 0)
func = self._prepare_method(idxmin_builder, **kwargs)
min_result = self.full_axis_reduce(func, axis)
# Because our internal partitions don't track the external index, we
# have to do a conversion.
return self._post_process_idx_ops(axis, min_result)
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._prepare_method(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index.
first_result = self.full_axis_reduce(func, 0)
return self.index[first_result.max()]
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
Series containing the median of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.median, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
Series containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(index=False, deep=deep)
deep = kwargs.get("deep", False)
func = self._prepare_method(memory_usage_builder, **kwargs)
return self.full_axis_reduce(func, 0)
def nunique(self, **kwargs):
"""Returns the number of unique items over each column or row.
Returns:
Series of ints indexed by column or index names.
"""
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.nunique, **kwargs)
return self.full_axis_reduce(func, axis)
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
Series containing the quantile of each column or row.
"""
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
numeric_only = kwargs.get("numeric_only", True)
assert type(q) is float
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
else:
query_compiler = self
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._prepare_method(quantile_builder, **kwargs)
result = query_compiler.full_axis_reduce(func, axis)
result.name = q
return result
def skew(self, **kwargs):
"""Returns skew of each column or row.
Returns:
Series containing the skew of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.skew, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def std(self, **kwargs):
"""Returns standard deviation of each column or row.
Returns:
Series containing the standard deviation of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.std, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def to_datetime(self, **kwargs):
"""Converts the Manager to a Series of DateTime objects.
Returns:
Series of DateTime objects.
"""
columns = self.columns
def to_datetime_builder(df, **kwargs):
df.columns = columns
return pandas.to_datetime(df, **kwargs)
func = self._prepare_method(to_datetime_builder, **kwargs)
return self.full_axis_reduce(func, 1)
def var(self, **kwargs):
"""Returns variance of each column or row.
Returns:
Series containing the variance of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = query_compiler._prepare_method(pandas.DataFrame.var, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_axis_reduce_along_select_indices(
self, func, axis, index, pandas_result=True
):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces Manager to Series using full knowledge of an
axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting series.
pandas_result: Return the result as a Pandas Series instead of raw data.
Returns:
Either a Pandas Series with index or BaseBlockPartitions object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
if pandas_result:
result = result.to_pandas(self._is_transposed)
result.index = index
return result
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
include = kwargs.get("include", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
if include is None:
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if is_list_like(exclude):
exclude.append(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(
func, 0, new_columns, False
)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series(
[np.float64 for _ in new_columns], index=new_columns
)
else:
new_dtypes = pandas.Series(
[np.object for _ in new_columns], index=new_columns
)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def map_across_full_axis(self, axis, func):
return self.data.map_across_full_axis(axis, func)
def _cumulative_builder(self, func, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(func, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(
new_data, self.index, self.columns, self._dtype_cache
)
def cumsum(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cumsum, **kwargs)
def cummax(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cummax, **kwargs)
def cummin(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cummin, **kwargs)
def cumprod(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cumprod, **kwargs)
def diff(self, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.diff, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def dropna(self, **kwargs):
"""Returns a new DataManager with null values dropped along given axis.
Return:
a new DataManager
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset")
thresh = kwargs.get("thresh")
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1) > thresh for ax in axis
}
else:
drop_values = {
ax ^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1) for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
def eval(self, expr, **kwargs):
"""Returns a new DataManager with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new PandasDataManager with new columns after applying expr.
"""
inplace = kwargs.get("inplace", False)
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
# if there is no assignment, then we simply save the results
# in the first column
if expect_series:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
else:
expr = "{0} = {1}".format(columns[0], expr)
def eval_builder(df, **kwargs):
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
result.columns = pandas.RangeIndex(0, len(result.columns))
return result
func = self._prepare_method(eval_builder, **kwargs)
new_data = self.map_across_full_axis(1, func)
if expect_series:
result = new_data.to_pandas()[0]
result.name = columns_copy.name
result.index = index
return result
else:
columns = columns_copy.columns
return self.__constructor__(new_data, self.index, columns)
def mode(self, **kwargs):
"""Returns a new DataManager with modes calculated for each label along given axis.
Returns:
A new PandasDataManager with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return result
func = self._prepare_method(mode_builder, **kwargs)
new_data = self.map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
return self.__constructor__(
new_data, new_index, new_columns, self._dtype_cache
).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new PandasDataManager with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self.map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
DataManager containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self.map_across_full_axis(axis, func)
# Since we assume no knowledge of internal state, we get the columns
# from the internal partitions.
if numeric_only:
new_columns = self.compute_index(1, new_data, True)
else:
new_columns = self.columns
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
DataManager containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
index = self.columns if axis else self.index
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
def sort_index_builder(df, **kwargs):
if axis:
df.columns = index
else:
df.index = index
return df.sort_index(axis=axis, **kwargs)
func = self._prepare_method(sort_index_builder, **kwargs)
new_data = self.map_across_full_axis(axis, func)
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
return self.__constructor__(
new_data, new_index, new_columns, self.dtypes.copy()
)
# END Map across rows/columns
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def map_across_full_axis_select_indices(
self, axis, func, indices, keep_remaining=False
):
"""Maps function to select indices along full axis.
Args:
axis: 0 for columns and 1 for rows.
func: Callable mapping function over the BlockParitions.
indices: indices along axis to map over.
keep_remaining: True if keep indices where function was not applied.
Returns:
BaseBlockPartitions containing the result of mapping func over axis on indices.
"""
return self.data.apply_func_to_select_indices_along_full_axis(
axis, func, indices, keep_remaining
)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis.
"""
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or
|
is_datetime_or_timedelta_dtype(dtype)
|
pandas.core.dtypes.common.is_datetime_or_timedelta_dtype
|
import importlib
import sys
import numpy as np
import pandas as pd
import pytest
from sweat import utils
from sweat.metrics import power
@pytest.fixture()
def reload_power_module():
yield
key_values = [(key, value) for key, value in sys.modules.items()]
for key, value in key_values:
if (
key.startswith("sweat.hrm")
or key.startswith("sweat.pdm")
or key.startswith("sweat.metrics")
):
importlib.reload(value)
def test_enable_type_casting_module(reload_power_module):
pwr = [1, 2, 3]
wap = [1, 2, 3]
weight = 80
threshold_power = 80
assert isinstance(power.wpk(np.asarray(pwr), weight), np.ndarray)
assert isinstance(
power.relative_intensity(np.asarray(wap), threshold_power), np.ndarray
)
with pytest.raises(TypeError):
power.wpk(pwr, weight)
with pytest.raises(TypeError):
power.relative_intensity(wap, threshold_power)
doc_string = power.wpk.__doc__
utils.enable_type_casting(power)
assert isinstance(power.wpk(pwr, weight), list)
assert isinstance(power.wpk(pd.Series(pwr), weight), pd.Series)
assert isinstance(power.wpk(np.asarray(pwr), weight), np.ndarray)
assert isinstance(power.relative_intensity(wap, threshold_power), list)
assert isinstance(
power.relative_intensity(
|
pd.Series(wap)
|
pandas.Series
|
"""
Developed by: <NAME>
This module is used to read and process 3-hourly UTCI output derived from CMIP6 models.
Function call
=============
The main wrapper function is::
calc_percentile_difference()
This calculates the percentile values (default 95th) for each scenario within a given
time period (default 30 years from 2071-2100) and subtracts the same percentile from
historical data (default 30 years from 1985-2014). It does this monthly over the 30 year
time period and produces a DataArray (output netcdf file) containing 12 monthly values for
each lat, lon, model and scenario.
This can be called as::
calc_percentile_difference(output_path)
where `output_path` specifies where to write the output data file.
This file can be run as::
$ python utci_diff.py
This will call the __main__ block below and output to the output_path specified there
(to our project10 gws for the hackathon).
Note: ** At the moment this is hardwired to expect certain models and scenarios but this
can be updated either within the function or additional inputs added to allow this to
be updated **
Expected data structure and files
=================================
Expected directory structure for these files::
model/scenario/run_name/
e.g. BCC-CSM2-MR/historical/r1i1p1f1/
Example filename::
utci_3hr_BCC-CSM2-MR_historical_r1i1p1f1_gn_198501010300-198601010000.nc
The date string of the form YYYYMMDDhhmmss-YYYYMMDDhhmmss (e.g. 198501010300-198601010000)
is expected and used when extracting the files with an input date range. This is also
expected as a netcdf (.nc) file.
File should contain (at least) the "utci" variable with (time, lat, lon) dimensions.
"""
import os
import re
import numpy as np
import pandas as pd
import xarray as xr
from pathlib import Path
data_path = Path("/gws/pw/j05/cop26_hackathons/bristol/project10/utci_projections_1deg")
def define_path(model, scenario, base_path=data_path, run="r1i1p1f1"):
''' Define path to each set of 3hr UTCI input files '''
path = Path(os.path.join(base_path, model, scenario, run))
return path
def extract_files(filenames, start, end):
'''
Extract filenames within a given date range.
Expect filename strings of the format:
*YYYYMMDDhhmmss-YYYYMMDDhhmmss*.nc
e.g. ./utci_3hr_BCC-CSM2-MR_historical_r1i1p1f1_gn_198501010300-198601010000.nc
Input:
filenames (list) :
List of filenames extracted from a folder
start, end (str) :
Start and end date to use for filtering the file names.
Will definitely work when start and end is included as "YYYY"
Should also work for other recognised pandas formats e.g.
"YYYY-MM-DD".
Returns:
list :
List of filenames within the date range specified. Filtered
from input filenames.
ValueError:
No files are found in that date range
TODO: Only start date extracted from file name is used at present to
filter the filenamea. Could also use end date.
'''
# dateformat example "198501010300-198601010000"
# Expect input start and end date as string e.g. "2013" for now
start = pd.to_datetime(start)#, format="%Y")
end = pd.to_datetime(end)#, format="%Y")
filenames_match = []
for filename in filenames:
filename = str(filename)
try:
re_str = "\d{12}[-]\d{12}"
d = re.search(re_str, filename)
d = d.group() # Extract value from regular expression compiler
except AttributeError:
pass
else:
s = d.split('-')[0]
s =
|
pd.to_datetime(s, format="%Y%m%d%H%M%S")
|
pandas.to_datetime
|
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
import glob
import pandas as pd
import numpy as np
from sklearn import decomposition
import deprecated
import logging
sys.path.append(root_path)
from config.globalLog import logger
def generate_monoscale_samples(source_file, save_path, lags_dict, column, test_len, lead_time=1,regen=False):
"""Generate learning samples for autoregression problem using original time series.
Args:
'source_file' -- ['String'] The source data file path.
'save_path' --['String'] The path to restore the training, development and testing samples.
'lags_dict' -- ['int dict'] The lagged time for original time series.
'column' -- ['String']The column's name for read the source data by pandas.
'test_len' --['int'] The length of development and testing set.
'lead_time' --['int'] The lead time.
"""
logger.info('Generating muliti-step decomposition-ensemble hindcasting samples')
save_path = save_path+'/'+str(lead_time)+'_ahead_pacf/'
logger.info('Source file:{}'.format(source_file))
logger.info('Save path:{}'.format(save_path))
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# Load data from local dick
if '.xlsx' in source_file:
dataframe = pd.read_excel(source_file)[column]
elif '.csv' in source_file:
dataframe = pd.read_csv(source_file)[column]
# convert pandas dataframe to numpy array
nparr = np.array(dataframe)
# Create an empty pandas Dataframe
full_samples = pd.DataFrame()
# Generate input series based on lag and add these series to full dataset
lag = lags_dict['ORIG']
for i in range(lag):
x = pd.DataFrame(nparr[i:dataframe.shape[0] -
(lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
full_samples = pd.concat([full_samples, x], axis=1, sort=False)
# Generate label data
label = pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])
label = label.reset_index(drop=True)
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Add labled data to full_data_set
full_samples = pd.concat([full_samples, label], axis=1, sort=False)
# Get the length of this series
series_len = full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(series_len - test_len)]
# Get the testing set.
test_samples = full_samples[(series_len - test_len):series_len]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(series_len - test_len - test_len)]
dev_samples = full_samples[(
series_len - test_len - test_len):(series_len - test_len)]
assert (train_samples.shape[0] + dev_samples.shape[0] +
test_samples.shape[0]) == series_len
# Get the max and min value of each series
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Series length:{}'.format(series_len))
logger.info('Series length:{}'.format(series_len))
logger.info(
'Training-development sample size:{}'.format(train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+'norm_unsample_id.csv')
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_hindcast_samples(station, decomposer, lags_dict, input_columns, output_column, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate one step hindcast decomposition-ensemble learning samples.
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decompositin algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for each subsignal.
'input_columns'-- ['string list'] The input columns' name used for generating the learning samples.
'output_columns'-- ['string'] The output column's name used for generating the learning samples.
'test_len'-- ['int'] The size of development and testing samples ().
"""
logger.info('Generating one-step decomposition ensemble hindcasting samples')
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"one_step_"+str(lead_time)+"_ahead_hindcast_pacf/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
decompose_file = data_path+decomposer.upper()+"_FULL.csv"
decompositions = pd.read_csv(decompose_file)
# Drop NaN
decompositions.dropna()
# Get the input data (the decompositions)
input_data = decompositions[input_columns]
# Get the output data (the original time series)
output_data = decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
samples_size = data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each subsignal
full_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
oness = pd.DataFrame()
lag = lags_dict[input_columns[i]]
for j in range(lag):
x = pd.DataFrame(one_in[j:data_size-(lag-j)],
columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
# make all sample size of each subsignal identical
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
full_samples = pd.concat([full_samples, oness], axis=1, sort=False)
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Concat the features and target
full_samples = pd.concat([full_samples, target], axis=1, sort=False)
full_samples = pd.DataFrame(full_samples.values, columns=samples_cols)
full_samples.to_csv(save_path+'full_samples.csv')
assert samples_size == full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(samples_size - test_len)]
# Get the testing set.
test_samples = full_samples[(samples_size - test_len):samples_size]
# train_dev_len = train_dev_samples.shape[0]
train_samples = full_samples[0:(samples_size - test_len - test_len)]
dev_samples = full_samples[(
samples_size - test_len - test_len):(samples_size - test_len)]
assert (train_samples['X1'].size + dev_samples['X1'].size +
test_samples['X1'].size) == samples_size
# Get the max and min value of training set
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Save path:{}'.format(save_path))
logger.info('Series length:{}'.format(samples_size))
logger.info('Training and development sample size:{}'.format(
train_dev_samples.shape[0]))
logger.info('Training sample size:{}'.format(train_samples.shape[0]))
logger.info('Development sample size:{}'.format(dev_samples.shape[0]))
logger.info('Testing sample size:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+'norm_unsample_id.csv')
train_samples.to_csv(save_path + 'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path + 'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_forecast_samples_triandev_test(station, decomposer, lags_dict, input_columns, output_column, start, stop, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate one step forecast decomposition-ensemble samples.
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decompositin algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for subsignals.
'input_columns'-- ['string lsit'] the input columns' name for read the source data by pandas.
'output_columns'-- ['string'] the output column's name for read the source data by pandas.
'start'-- ['int'] The start index of appended decomposition file.
'stop'-- ['int'] The stop index of appended decomposotion file.
'test_len'-- ['int'] The size of development and testing samples.
"""
logger.info(
'Generateing one-step decomposition ensemble forecasting samples (traindev-test pattern)')
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Validation start index:{}'.format(start))
logger.info('Validation stop index:{}'.format(stop))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf_traindev_test/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# !!!!!!Generate training samples
traindev_decompose_file = data_path+decomposer.upper()+"_TRAINDEV.csv"
traindev_decompositions = pd.read_csv(traindev_decompose_file)
# Drop NaN
traindev_decompositions.dropna()
# Get the input data (the decompositions)
traindev_input_data = traindev_decompositions[input_columns]
# Get the output data (the original time series)
traindev_output_data = traindev_decompositions[output_column]
# Get the number of input features
subsignals_num = traindev_input_data.shape[1]
# Get the data size
traindev_data_size = traindev_input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
traindev_samples_size = traindev_data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each input feature
train_dev_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one input feature
one_in = (traindev_input_data[input_columns[i]]).values # subsignal
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame() # restor input features
for j in range(lag):
x = pd.DataFrame(one_in[j:traindev_data_size-(lag-j)],
columns=['X' + str(j + 1)])['X' + str(j + 1)]
x = x.reset_index(drop=True)
oness = pd.DataFrame(pd.concat([oness, x], axis=1))
oness = oness.iloc[oness.shape[0]-traindev_samples_size:]
oness = oness.reset_index(drop=True)
train_dev_samples = pd.DataFrame(
pd.concat([train_dev_samples, oness], axis=1))
# Get the target
target = (traindev_output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
train_dev_samples = train_dev_samples[:traindev_samples_size-(lead_time-1)]
train_dev_samples = train_dev_samples.reset_index(drop=True)
# Concat the features and target
train_dev_samples = pd.concat([train_dev_samples, target], axis=1)
train_dev_samples = pd.DataFrame(
train_dev_samples.values, columns=samples_cols)
train_dev_samples.to_csv(save_path+'train_dev_samples.csv')
train_samples = train_dev_samples[:train_dev_samples.shape[0]-120]
dev_samples = train_dev_samples[train_dev_samples.shape[0]-120:]
assert traindev_samples_size == train_dev_samples.shape[0]
# normalize the train_samples
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = pd.DataFrame()
appended_file_path = data_path+decomposer+"-test/"
for k in range(start, stop+1):
# Load data from local dick
appended_decompositions = pd.read_csv(
appended_file_path+decomposer+'_appended_test'+str(k)+'.csv')
# Drop NaN
appended_decompositions.dropna()
# Get the input data (the decompositions)
input_data = appended_decompositions[input_columns]
# Get the output data (the original time series)
output_data = appended_decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
samples_size = data_size-max_lag
# Generate input colmuns for each subsignal
appended_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame()
for j in range(lag):
x = pd.DataFrame(
one_in[j:data_size-(lag-j)], columns=['X' + str(j + 1)])['X' + str(j + 1)]
x = x.reset_index(drop=True)
oness = pd.DataFrame(pd.concat([oness, x], axis=1))
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
appended_samples = pd.DataFrame(
pd.concat([appended_samples, oness], axis=1))
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
appended_samples = appended_samples[:
appended_samples.shape[0]-(lead_time-1)]
appended_samples = appended_samples.reset_index(drop=True)
# Concat the features and target
appended_samples = pd.concat([appended_samples, target], axis=1)
appended_samples = pd.DataFrame(
appended_samples.values, columns=samples_cols)
# Get the last sample of full samples
last_sample = appended_samples.iloc[appended_samples.shape[0]-1:]
test_samples = pd.concat([test_samples, last_sample], axis=0)
test_samples = test_samples.reset_index(drop=True)
test_samples.to_csv(save_path+'test_samples.csv')
test_samples = 2*(test_samples-series_min)/(series_max-series_min)-1
assert test_len == test_samples.shape[0]
logger.info('Save path:{}'.format(save_path))
logger.info('The size of training samples:{}'.format(
train_samples.shape[0]))
logger.info('The size of development samples:{}'.format(
dev_samples.shape[0]))
logger.info('The size of testing samples:{}'.format(test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+"norm_unsample_id.csv")
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_one_step_forecast_samples(station, decomposer, lags_dict, input_columns, output_column, start, stop, test_len,
wavelet_level="db10-2", lead_time=1, mode='PACF', pre_times=20, filter_boundary=0.2, n_components=None,regen=False):
"""
Generate one step forecast decomposition-ensemble samples based on
Partial autocorrelation function (PACF), Pearson coefficient correlation (pearson).
Set n_components to 'mle' or an integer to perform principle component analysis (PCA).
Args:
'station'-- ['string'] The station where the original time series come from.
'decomposer'-- ['string'] The decomposition algorithm used for decomposing the original time series.
'lags_dict'-- ['int dict'] The lagged time for subsignals in 'PACF' mode.
'input_columns'-- ['string list'] the input columns' name for read the source data by pandas.
'output_column'-- ['string'] the output column's name for read the source data by pandas.
'start'-- ['int'] The start index of appended decomposition file.
'stop'-- ['int'] The stop index of appended decomposition file.
'test_len'-- ['int'] The size of development and testing samples.
'wavelet_level'-- ['String'] The mother wavelet and decomposition level of DWT.
'lead_time'-- ['int'] The lead time for auto regression models.
'mode'-- ['String'] The samples generation mode, i.e., "PACF" and "Pearson", for auto regression models.
'pre_times'-- ['int'] The lag times for compute Pearson coefficient correlation.
'filter_boundary'-- ['float'] The filter threshold of Pearson coefficient correlation for selecting input predictors.
'n_components'-- ['String or int'] The number of reserved components in PCA. If n_components is set to None, PCA will not be performed.
"""
logger.info(
"Generateing one-step decomposition ensemble forecasting samples (train-devtest pattern)")
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Input columns:{}'.format(input_columns))
logger.info('Output column:{}'.format(output_column))
logger.info('Validation start index:{}'.format(start))
logger.info('Validation stop index:{}'.format(stop))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
logger.info('Generation mode:{}'.format(mode))
logger.info('Selected previous lag times:{}'.format(pre_times))
logger.info(
'Filter threshold of predictors selection:{}'.format(filter_boundary))
logger.info('Number of components for PCA:{}'.format(n_components))
# Load data from local dick
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
if mode == 'PACF' and n_components == None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf/"
elif mode == 'PACF' and n_components != None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pacf_pca"+str(n_components)+"/"
elif mode == 'Pearson' and n_components == None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pearson"+str(filter_boundary)+"/"
elif mode == 'Pearson' and n_components != None:
save_path = data_path+"one_step_" + \
str(lead_time)+"_ahead_forecast_pearson" + \
str(filter_boundary)+"_pca"+str(n_components)+"/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
# !!!!!!Generate training samples
if mode == 'PACF':
train_decompose_file = data_path+decomposer.upper()+"_TRAIN.csv"
train_decompositions = pd.read_csv(train_decompose_file)
# Drop NaN
train_decompositions.dropna()
# Get the input data (the decompositions)
train_input_data = train_decompositions[input_columns]
# Get the output data (the original time series)
train_output_data = train_decompositions[output_column]
# Get the number of input features
subsignals_num = train_input_data.shape[1]
# Get the data size
train_data_size = train_input_data.shape[0]
# Compute the samples size
max_lag = max(lags_dict.values())
logger.debug('max lag:{}'.format(max_lag))
train_samples_size = train_data_size-max_lag
# Generate feature columns
samples_cols = []
for i in range(sum(lags_dict.values())):
samples_cols.append('X'+str(i+1))
samples_cols.append('Y')
# Generate input colmuns for each input feature
train_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one input feature
one_in = (train_input_data[input_columns[i]]).values # subsignal
lag = lags_dict[input_columns[i]]
logger.debug('lag:{}'.format(lag))
oness = pd.DataFrame() # restor input features
for j in range(lag):
x = pd.DataFrame(one_in[j:train_data_size-(lag-j)], columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
logger.debug("oness:\n{}".format(oness))
oness = oness.iloc[oness.shape[0]-train_samples_size:]
oness = oness.reset_index(drop=True)
train_samples = pd.concat([train_samples, oness], axis=1, sort=False)
# Get the target
target = (train_output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
# Concat the features and target
train_samples = train_samples[:train_samples.shape[0]-(lead_time-1)]
train_samples = train_samples.reset_index(drop=True)
train_samples = pd.concat([train_samples, target], axis=1)
train_samples = pd.DataFrame(train_samples.values, columns=samples_cols)
train_samples.to_csv(save_path+'train_samples.csv')
# assert train_samples_size == train_samples.shape[0]
# !!!!!!!!!!!Generate development and testing samples
dev_test_samples = pd.DataFrame()
appended_file_path = data_path+decomposer+"-test/"
for k in range(start, stop+1):
# Load data from local dick
appended_decompositions = pd.read_csv(
appended_file_path+decomposer+'_appended_test'+str(k)+'.csv')
# Drop NaN
appended_decompositions.dropna()
# Get the input data (the decompositions)
input_data = appended_decompositions[input_columns]
# Get the output data (the original time series)
output_data = appended_decompositions[output_column]
# Get the number of input features
subsignals_num = input_data.shape[1]
# Get the data size
data_size = input_data.shape[0]
# Compute the samples size
samples_size = data_size-max_lag
# Generate input colmuns for each subsignal
appended_samples = pd.DataFrame()
for i in range(subsignals_num):
# Get one subsignal
one_in = (input_data[input_columns[i]]).values
lag = lags_dict[input_columns[i]]
oness = pd.DataFrame()
for j in range(lag):
x = pd.DataFrame(
one_in[j:data_size-(lag-j)], columns=['X' + str(j + 1)])
x = x.reset_index(drop=True)
oness = pd.concat([oness, x], axis=1, sort=False)
oness = oness.iloc[oness.shape[0]-samples_size:]
oness = oness.reset_index(drop=True)
appended_samples = pd.concat(
[appended_samples, oness], axis=1, sort=False)
# Get the target
target = (output_data.values)[max_lag+lead_time-1:]
target = pd.DataFrame(target, columns=['Y'])
# Concat the features and target
appended_samples = appended_samples[:
appended_samples.shape[0]-(lead_time-1)]
appended_samples = appended_samples.reset_index(drop=True)
appended_samples = pd.concat(
[appended_samples, target], axis=1, sort=False)
appended_samples = pd.DataFrame(
appended_samples.values, columns=samples_cols)
# Get the last sample of full samples
last_sample = appended_samples.iloc[appended_samples.shape[0]-1:]
dev_test_samples = pd.concat(
[dev_test_samples, last_sample], axis=0)
dev_test_samples = dev_test_samples.reset_index(drop=True)
dev_test_samples.to_csv(save_path+'dev_test_samples.csv')
dev_samples = dev_test_samples.iloc[0: dev_test_samples.shape[0]-test_len]
test_samples = dev_test_samples.iloc[dev_test_samples.shape[0]-test_len:]
if n_components != None:
logger.info('Performa PCA on samples based on PACF')
samples = pd.concat([train_samples, dev_samples, test_samples], axis=0, sort=False)
samples = samples.reset_index(drop=True)
y = samples['Y']
X = samples.drop('Y', axis=1)
logger.debug('X contains Nan:{}'.format(X.isnull().values.any()))
logger.debug("Input features before PAC:\n{}".format(X))
pca = decomposition.PCA(n_components=n_components)
pca.fit(X)
pca_X = pca.transform(X)
columns = []
for i in range(1, pca_X.shape[1]+1):
columns.append('X'+str(i))
pca_X = pd.DataFrame(pca_X, columns=columns)
logger.debug("Input features after PAC:\n{}".format(pca_X.tail()))
pca_samples = pd.concat([pca_X, y], axis=1)
train_samples = pca_samples.iloc[:train_samples.shape[0]]
train_samples = train_samples.reset_index(drop=True)
logger.debug('Training samples after PCA:\n{}'.format(train_samples))
dev_samples = pca_samples.iloc[train_samples.shape[0]:train_samples.shape[0]+dev_samples.shape[0]]
dev_samples = dev_samples.reset_index(drop=True)
logger.debug('Development samples after PCA:\n{}'.format(dev_samples))
test_samples = pca_samples.iloc[train_samples.shape[0] +dev_samples.shape[0]:]
test_samples = test_samples.reset_index(drop=True)
logger.debug('Testing samples after PCA:\n{}'.format(test_samples))
# Normalize each series to the range between -1 and 1
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples-series_min) / \
(series_max-series_min) - 1
test_samples = 2 * (test_samples-series_min) / \
(series_max-series_min) - 1
logger.info('Save path:{}'.format(save_path))
logger.info('The size of training samples:{}'.format(
train_samples.shape[0]))
logger.info('The size of development samples:{}'.format(
dev_samples.shape[0]))
logger.info('The size of testing samples:{}'.format(
test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+"norm_unsample_id.csv")
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
elif mode == 'Pearson':
# lag pre_times+lead_time(e.g.,30+3)
lag = pre_times+lead_time
pre_cols = []
for i in range(1, pre_times+1):
pre_cols.append("X"+str(i))
logger.debug("Previous columns of lagged months:\n{}".format(pre_cols))
train_decompose_file = data_path+decomposer.upper()+"_TRAIN.csv"
train_decompositions = pd.read_csv(train_decompose_file)
orig = train_decompositions[output_column][lag:]
orig = orig.reset_index(drop=True)
selected = {}
input_df = pd.DataFrame()
for col in input_columns:
logger.debug("Perform subseries:{}".format(col))
subsignal = np.array(train_decompositions[col])
inputs = pd.DataFrame()
for k in range(lag):
x = pd.DataFrame(
subsignal[k:subsignal.size-(lag-k)], columns=["X"+str(k+1)])["X"+str(k+1)]
x = x.reset_index(drop=True)
inputs = pd.DataFrame(pd.concat([inputs, x], axis=1))
pre_inputs = inputs[pre_cols]
logger.debug("Previous inputs:\n{}".format(pre_inputs.head()))
partin_out = pd.concat([pre_inputs, orig], axis=1)
logger.debug(
"Partial inputs and output:\n{}".format(partin_out.head()))
corrs = partin_out.corr(method="pearson")
logger.debug("Entire pearson coefficients:\n{}".format(corrs))
corrs = (corrs[output_column]).iloc[0:corrs.shape[0]-1]
orig_corrs = corrs.squeeze()
logger.debug("Selected pearson coefficients:\n{}".format(orig_corrs))
bools = abs(orig_corrs) >= filter_boundary
logger.debug("Conditions judge:{}".format(bools))
select = list((orig_corrs.loc[bools == True]).index.values)
logger.debug("Selected inputs:\n{}".format(select))
selected[col] = select
input_df = pd.concat([input_df, pre_inputs[select]], axis=1)
logger.debug("Selected inputs:\n{}".format(selected))
logger.debug("Entire inputs:\n{}".format(input_df.head()))
columns = []
for i in range(0, input_df.shape[1]):
columns.append("X"+str(i+1))
columns.append("Y")
train_samples = pd.DataFrame(
(pd.concat([input_df, orig], axis=1)).values, columns=columns)
dev_test_samples = pd.DataFrame()
for i in range(start, stop+1):
append_decompositions = pd.read_csv(
data_path+decomposer+"-test/"+decomposer+"_appended_test"+str(i)+".csv")
append_orig = append_decompositions[output_column][lag:]
append_orig = append_orig.reset_index(drop=True)
append_input_df = pd.DataFrame()
for col in input_columns:
append_subsignal = np.array(append_decompositions[col])
append_inputs = pd.DataFrame()
for k in range(lag):
x = pd.DataFrame(
append_subsignal[k:append_subsignal.size-(lag-k)], columns=["X"+str(k+1)])["X"+str(k+1)]
x = x.reset_index(drop=True)
append_inputs = pd.concat([append_inputs, x], axis=1)
append_input_df = pd.concat(
[append_input_df, append_inputs[selected[col]]], axis=1)
append_samples = pd.concat([append_input_df, append_orig], axis=1)
append_samples = pd.DataFrame(append_samples.values, columns=columns)
last_sample = append_samples.iloc[append_samples.shape[0]-1:]
dev_test_samples = pd.concat(
[dev_test_samples, last_sample], axis=0)
dev_test_samples = dev_test_samples.reset_index(drop=True)
dev_samples = dev_test_samples.iloc[0:
dev_test_samples.shape[0]-test_len]
test_samples = dev_test_samples.iloc[dev_test_samples.shape[0]-test_len:]
dev_samples = dev_samples.reset_index(drop=True)
test_samples = test_samples.reset_index(drop=True)
# Perform PCA on samples based on Pearson
if n_components != None:
logger.info('Performa PCA on samples based on PACF')
samples = pd.concat(
[train_samples, dev_samples, test_samples], axis=0, sort=False)
samples = samples.reset_index(drop=True)
y = samples['Y']
X = samples.drop('Y', axis=1)
logger.debug("Input features before PAC:\n{}".format(X.tail()))
pca = decomposition.PCA(n_components=n_components)
pca.fit(X)
pca_X = pca.transform(X)
columns = []
for i in range(1, pca_X.shape[1]+1):
columns.append('X'+str(i))
pca_X = pd.DataFrame(pca_X, columns=columns)
logger.debug("Input features after PAC:\n{}".format(pca_X.tail()))
pca_samples = pd.concat([pca_X, y], axis=1)
train_samples = pca_samples.iloc[:train_samples.shape[0]]
train_samples = train_samples.reset_index(drop=True)
dev_samples = pca_samples.iloc[train_samples.shape[0]:train_samples.shape[0]+dev_samples.shape[0]]
dev_samples = dev_samples.reset_index(drop=True)
test_samples = pca_samples.iloc[train_samples.shape[0] +dev_samples.shape[0]:]
test_samples = test_samples.reset_index(drop=True)
# Normalize the samples
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2*(dev_samples-series_min)/(series_max-series_min)-1
test_samples = 2*(test_samples-series_min)/(series_max-series_min)-1
# Save results
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(save_path+"norm_unsample_id.csv")
train_samples.to_csv(save_path+'minmax_unsample_train.csv', index=None)
dev_samples.to_csv(save_path+'minmax_unsample_dev.csv', index=None)
test_samples.to_csv(save_path+'minmax_unsample_test.csv', index=None)
def gen_multi_step_hindcast_samples(station, decomposer, lags_dict, columns, test_len,
wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate muliti-step learning samples for autoregression problem.
This program could generate source CSV fflie for .tfrecords file generating.
Args:
-station: The station where the original time series observed.
-decomposer: The decomposition algorithm for decomposing the original time series.
-lags_dict: The lags for autoregression.
-columns: the columns' name for read the source data by pandas.
-save_path: The path to restore the training, development and testing samples.
-test_len: The length of validation(development or testing) set.
"""
logger.info(
"Generating muliti-step decompositionensemble hindcasting samples")
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Signals:{}'.format(columns))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"multi_step_"+str(lead_time)+"_ahead_hindcast_pacf/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
decompose_file = data_path+decomposer.upper()+"_FULL.csv"
decompositions = pd.read_csv(decompose_file)
for k in range(len(columns)):
lag = lags_dict[columns[k]]
if lag == 0:
logger.info("The lag of sub-signal({:.0f})".format(k+1)+" equals to 0")
continue
# Obtain decomposed sub-signal
sub_signal = decompositions[columns[k]]
# convert pandas dataframe to numpy array
nparr = np.array(sub_signal)
# Create an empty pandas Dataframe
full_samples = pd.DataFrame()
# Generate input series based on lag and add these series to full dataset
for i in range(lag):
x = pd.DataFrame(
nparr[i:sub_signal.shape[0] - (lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
full_samples = pd.DataFrame(pd.concat([full_samples, x], axis=1))
# Generate label data
label = pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])['Y']
label = label.reset_index(drop=True)
full_samples = full_samples[:full_samples.shape[0]-(lead_time-1)]
full_samples = full_samples.reset_index(drop=True)
# Add labled data to full_data_set
full_samples = pd.concat([full_samples, label], axis=1, sort=False)
# Get the length of this series
series_len = full_samples.shape[0]
# Get the training and developing set
train_dev_samples = full_samples[0:(series_len - test_len)]
# Get the testing set.
test_samples = full_samples[(series_len - test_len):series_len]
# Do sampling if 'sampling' is True
train_samples = full_samples[0:(series_len - test_len - test_len)]
dev_samples = full_samples[(
series_len - test_len - test_len):(series_len - test_len)]
assert (train_samples.shape[0] + dev_samples.shape[0] +
test_samples.shape[0]) == series_len
# Get the max and min value of each series
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
dev_samples = 2 * (dev_samples - series_min) / \
(series_max - series_min) - 1
test_samples = 2 * (test_samples - series_min) / \
(series_max - series_min) - 1
logger.info('Series length:{}'.format(series_len))
logger.info('Save path:{}'.format(save_path))
logger.info('The size of training and development samples:{}'.format(
train_dev_samples.shape[0]))
logger.info('The size of training samples:{}'.format(
train_samples.shape[0]))
logger.info('The size of development samples:{}'.format(
dev_samples.shape[0]))
logger.info('The size of testing samples:{}'.format(
test_samples.shape[0]))
series_max = pd.DataFrame(series_max, columns=['series_max'])
series_min = pd.DataFrame(series_min, columns=['series_min'])
normalize_indicators = pd.concat([series_max, series_min], axis=1)
normalize_indicators.to_csv(
save_path+'norm_unsample_id_imf'+str(k+1)+'.csv')
train_samples.to_csv(
save_path+'minmax_unsample_train_imf'+str(k+1)+'.csv', index=None)
dev_samples.to_csv(
save_path+'minmax_unsample_dev_imf'+str(k+1)+'.csv', index=None)
test_samples.to_csv(
save_path+'minmax_unsample_test_imf'+str(k+1)+'.csv', index=None)
def gen_multi_step_forecast_samples(station, decomposer, lags_dict, columns, start, stop, test_len, wavelet_level="db10-2", lead_time=1,regen=False):
"""
Generate multi-step training samples for autoregression problem.
This program could generate source CSV fflie for .tfrecords file generating.
Args:
-station: The station where the original time series observed.
-decomposer: The decomposition algorithm for decomposing the original time series.
-lags_dict: The lags for autoregression.
-columns: the columns name for read the source data by pandas
-save_path: The path to save the training samples
"""
logger.info(
"Generating muliti-step decompositionensemble forecasting samples")
logger.info('Station:{}'.format(station))
logger.info('Decomposer:{}'.format(decomposer))
logger.info('Lags_dict:{}'.format(lags_dict))
logger.info('Signals:{}'.format(columns))
logger.info('Validation start index:{}'.format(start))
logger.info('Validation stop index:{}'.format(stop))
logger.info('Testing sample length:{}'.format(test_len))
logger.info(
'Mother wavelet and decomposition level:{}'.format(wavelet_level))
logger.info('Lead time:{}'.format(lead_time))
if decomposer == "dwt" or decomposer == 'modwt':
data_path = root_path+"/"+station+"_"+decomposer+"/data/"+wavelet_level+"/"
else:
data_path = root_path+"/"+station+"_"+decomposer+"/data/"
save_path = data_path+"multi_step_"+str(lead_time)+"_ahead_forecast_pacf/"
if not os.path.exists(save_path):
os.makedirs(save_path)
if len(os.listdir(save_path))>0 and not regen:
logger.info('Learning samples have been generated!')
else:
logger.info("Save path:{}".format(save_path))
# !!!!!!!!!!Generate training samples
train_decompose_file = data_path+decomposer.upper()+"_TRAIN.csv"
train_decompositions = pd.read_csv(train_decompose_file)
train_decompositions.dropna()
for k in range(len(columns)):
lag = lags_dict[columns[k]]
if lag == 0:
logger.info("The lag of sub-signal({:.0f})".format(k+1)+" equals to 0")
continue
# Generate sample columns
samples_columns = []
for l in range(1, lag+1):
samples_columns.append('X'+str(l))
samples_columns.append('Y')
# Obtain decomposed sub-signal
sub_signal = train_decompositions[columns[k]]
# convert pandas dataframe to numpy array
nparr = np.array(sub_signal)
# Create an empty pandas Dataframe
train_samples = pd.DataFrame()
# Generate input series based on lag and add these series to full dataset
for i in range(lag):
x = pd.DataFrame(
nparr[i:sub_signal.shape[0] - (lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
train_samples = pd.DataFrame(pd.concat([train_samples, x], axis=1))
# Generate label data
label = pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])['Y']
label = label.reset_index(drop=True)
train_samples = train_samples[:train_samples.shape[0]-(lead_time-1)]
train_samples = train_samples.reset_index(drop=True)
# Add labled data to full_data_set
train_samples = pd.concat([train_samples, label], axis=1, sort=False)
# Do sampling if 'sampling' is True
# Get the max and min value of each series
series_max = train_samples.max(axis=0)
series_min = train_samples.min(axis=0)
# Normalize each series to the range between -1 and 1
train_samples = 2 * (train_samples - series_min) / \
(series_max - series_min) - 1
# !!!!!Generate development and testing samples
dev_test_samples = pd.DataFrame()
appended_file_path = data_path+decomposer+"-test/"
for j in range(start, stop+1): # 遍历每一个附加分解结果
data = pd.read_csv(appended_file_path+decomposer +
'_appended_test'+str(j)+'.csv')
imf = data[columns[k]]
nparr = np.array(imf)
inputs = pd.DataFrame()
for i in range(lag):
x = pd.DataFrame(
nparr[i:nparr.size - (lag - i)], columns=['X' + str(i + 1)])
x = x.reset_index(drop=True)
inputs = pd.concat([inputs, x], axis=1, sort=False)
label =
|
pd.DataFrame(nparr[lag+lead_time-1:], columns=['Y'])
|
pandas.DataFrame
|
#%%
import numpy as np
import scipy as sp
import pandas as pd
import ccutils
#%%
# Set random seed
np.random.seed(42)
# Define number of boostrap estimates
n_estimates = 10000
# Define percentiles to save
percentiles = [.01, .05, .10, .25, .50, .75, .90, .95, .99]
# Read single cell data
df_micro = pd.read_csv('../../../data/csv_microscopy/' +
'single_cell_microscopy_data.csv')
#%%
# group by date and by IPTG concentration
df_group = df_micro.groupby(['date'])
# Define names for columns in data frame
names = ['date', 'IPTG_uM','operator', 'binding_energy',
'repressor', 'percentile',
'fold_change', 'fold_change_lower', 'fold_change_upper',
'noise', 'noise_lower', 'noise_upper',
'skewness', 'skewness_lower', 'skewness_upper']
# Initialize data frame to save the noise
df_noise =
|
pd.DataFrame(columns=names)
|
pandas.DataFrame
|
import pandas as pd
import warnings
import os
def get_train_test(PATH="./data/realworld", getScalar=False):
warnings.filterwarnings('ignore')
"""
load data
"""
# https://data.seoul.go.kr/dataList/datasetView.do?infId=OA-15245&srvType=F&serviceKind=1¤tPageNo=1&searchValue=&searchKey=null
dirs = PATH + "/rentals"
files = os.listdir(dirs)
files = [file for file in files if '2018' in file]
column_types = {
'대여일자': 'datetime64[ns]',
'대여시간': 'int32',
'대여소번호': 'category',
'대여소명': 'category',
'대여구분코드': 'category',
'성별': 'category',
'연령대코드': 'category',
'이용건수': 'int32',
'운동량': 'float32',
'탄소량': 'float32',
'이동거리': 'int32',
'이동시간': 'int32',
}
try:
df =
|
pd.read_pickle(PATH + '/dataframes/2018.pkl')
|
pandas.read_pickle
|
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""PortfolioOpt: Financial Portfolio Optimization
This module provides a set of functions for financial portfolio
optimization, such as construction of Markowitz portfolios, minimum
variance portfolios and tangency portfolios (i.e. maximum Sharpe ratio
portfolios) in Python. The construction of long-only, long/short and
market neutral portfolios is supported."""
import numpy as np
import pandas as pd
import cvxopt as opt
import cvxopt.solvers as optsolvers
import warnings
__all__ = ['markowitz_portfolio',
'min_var_portfolio',
'tangency_portfolio',
'max_ret_portfolio',
'truncate_weights']
def markowitz_portfolio(cov_mat, exp_rets, target_ret,
allow_short=False, market_neutral=False):
"""
Computes a Markowitz portfolio.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
exp_rets: pandas.Series
Expected asset returns (often historical returns).
target_ret: float
Target return of portfolio.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
market_neutral: bool, optional
If 'False' sum of weights equals one.
If 'True' sum of weights equal zero, i.e. create a
market neutral portfolio (implies allow_short=True).
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
if not isinstance(exp_rets, pd.Series):
raise ValueError("Expected returns is not a Series")
if not isinstance(target_ret, float):
raise ValueError("Target return is not a float")
if not cov_mat.index.equals(exp_rets.index):
raise ValueError("Indices do not match")
if market_neutral and not allow_short:
warnings.warn("A market neutral portfolio implies shorting")
allow_short=True
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# exp_rets*x >= target_ret and x >= 0
G = opt.matrix(np.vstack((-exp_rets.values,
-np.identity(n))))
h = opt.matrix(np.vstack((-target_ret,
+np.zeros((n, 1)))))
else:
# exp_rets*x >= target_ret
G = opt.matrix(-exp_rets.values).T
h = opt.matrix(-target_ret)
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
if not market_neutral:
b = opt.matrix(1.0)
else:
b = opt.matrix(0.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights
def tangency_portfolio(cov_mat, exp_rets, allow_short=False):
"""
Computes a tangency portfolio, i.e. a maximum Sharpe ratio portfolio.
Note: As the Sharpe ratio is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral tangency portfolios. This is because for
a positive initial Sharpe ratio the sharpe grows unbound
with increasing leverage.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
exp_rets: pandas.Series
Expected asset returns (often historical returns).
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
if not isinstance(exp_rets, pd.Series):
raise ValueError("Expected returns is not a Series")
if not cov_mat.index.equals(exp_rets.index):
raise ValueError("Indices do not match")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# exp_rets*x >= 1 and x >= 0
G = opt.matrix(np.vstack((-exp_rets.values,
-np.identity(n))))
h = opt.matrix(np.vstack((-1.0,
np.zeros((n, 1)))))
else:
# exp_rets*x >= 1
G = opt.matrix(-exp_rets.values).T
h = opt.matrix(-1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights =
|
pd.Series(sol['x'], index=cov_mat.index)
|
pandas.Series
|
import struct
import json
import datetime
import crc16
import binascii
import math
import logging
import pandas as pd
import numpy as np
class Ensemble:
"""
RoweTech Binary Ensemble.
RTB format.
"""
# Ensemble header size in bytes
HeaderSize = 32
# Checksum size
ChecksumSize = 4
# Maximum number of datasets.
MaxNumDataSets = 20
# Number of bytes in Int32
BytesInInt32 = 4
# Number of bytes in Float
BytesInFloat = 4
# Number of elements in dataset header
NUM_DATASET_HEADER_ELEMENTS = 6
# Bad Velocity
BadVelocity = float(88.888000)
# CSV Data Types
CSV_AMP = "Amp"
CSV_CORR = "Corr"
CSV_BEAM_VEL = "BeamVel"
CSV_INSTR_VEL = "InstrVel"
CSV_EARTH_VEL = "EarthVel"
CSV_GOOD_BEAM = "GoodBeam"
CSV_GOOD_EARTH = "GoodEarth"
CSV_PRESSURE = "Pressure"
CSV_XDCR_DEPTH = "XdcrDepth"
CSV_HEADING = "Heading"
CSV_PITCH = "Pitch"
CSV_ROLL = "Roll"
CSV_WATER_TEMP = "WaterTemp"
CSV_SYS_TEMP = "SysTemp"
CSV_SOS = "SpeedOfSound"
CSV_FIRST_PING_TIME = "FirstPingTime"
CSV_LAST_PING_TIME = "LastPingTime"
CSV_STATUS = "Status"
CSV_RT = "RT"
CSV_BT_HEADING = "BT_Heading"
CSV_BT_PITCH = "BT_PITCH"
CSV_BT_ROLL = "BT_ROLL"
CSV_BT_PRESSURE = "BT_PRESSURE"
CSV_BT_XDCR_DEPTH = "BT_XdcrDepth"
CSV_BT_STATUS = "BT_Status"
CSV_BT_RANGE = "BT_Range"
CSV_BT_AVG_RANGE = "BT_Avg_Range"
CSV_BT_BEAM_VEL = "BT_BeamVel"
CSV_BT_BEAM_GOOD = "BT_BeamGood"
CSV_BT_INSTR_VEL = "BT_InstrVel"
CSV_BT_INSTR_GOOD = "BT_InstrGood"
CSV_BT_EARTH_VEL = "BT_EarthVel"
CSV_BT_EARTH_GOOD = "BT_EarthGood"
CSV_RT_RANGE = "RT_Range"
CSV_RT_PINGS = "RT_Pings"
CSV_RT_BEAM_VEL = "RT_BeamVel"
CSV_RT_INSTR_VEL = "RT_InstrVel"
CSV_RT_EARTH_VEL = "RT_EarthVel"
CSV_GPS_HEADING = "GPS_Heading"
CSV_GPS_VTG = "GPS_VTG"
CSV_NMEA = "NMEA"
CSV_VOLTAGE = "Voltage"
CSV_MAG = "Magnitude"
CSV_DIR = "Direction"
CSV_DATETIME_FORMAT = "%m/%d/%Y %H:%M:%S.%f"
def __init__(self):
self.RawData = None
self.IsBeamVelocity = False
self.BeamVelocity = None
self.IsInstrumentVelocity = False
self.InstrumentVelocity = None
self.IsEarthVelocity = False
self.EarthVelocity = None
self.IsAmplitude = False
self.Amplitude = None
self.IsCorrelation = False
self.Correlation = None
self.IsGoodBeam = False
self.GoodBeam = None
self.IsGoodEarth = False
self.GoodEarth = None
self.IsEnsembleData = False
self.EnsembleData = None
self.IsAncillaryData = False
self.AncillaryData = None
self.IsBottomTrack = False
self.BottomTrack = None
self.IsWavesInfo = False
self.WavesInfo = None
self.IsRangeTracking = False
self.RangeTracking = None
self.IsSystemSetup = False
self.SystemSetup = None
self.IsNmeaData = False
self.NmeaData = None
def AddRawData(self, data):
"""
Add Raw bytearray data to the ensemble.
:param data: Raw data.
"""
self.RawData = data
def AddBeamVelocity(self, ds):
"""
Add a Beam Velocity object to the ensemble.
Set the flag that the dataset is added.
:param ds: Beam Velocity object.
"""
self.IsBeamVelocity = True
self.BeamVelocity = ds
def AddInstrumentVelocity(self, ds):
"""
Add a Instrument Velocity object to the ensemble.
Set the flag that the dataset is added.
:param ds: Instrument Velocity object.
"""
self.IsInstrumentVelocity = True
self.InstrumentVelocity = ds
def AddEarthVelocity(self, ds):
"""
Add a Earth Velocity object to the ensemble.
Set the flag that the dataset is added.
:param ds: Earth Velocity object.
"""
self.IsEarthVelocity = True
self.EarthVelocity = ds
def AddAmplitude(self, ds):
"""
Add a Amplitude object to the ensemble.
Set the flag that the dataset is added.
:param ds: Amplitude object.
"""
self.IsAmplitude = True
self.Amplitude = ds
def AddCorrelation(self, ds):
"""
Add a Correlation object to the ensemble.
Set the flag that the dataset is added.
:param ds: Correlation object.
"""
self.IsCorrelation = True
self.Correlation = ds
def AddGoodBeam(self, ds):
"""
Add a Good Beam object to the ensemble.
Set the flag that the dataset is added.
:param ds: GoodBeam object.
"""
self.IsGoodBeam = True
self.GoodBeam = ds
def AddGoodEarth(self, ds):
"""
Add a Good Earth object to the ensemble.
Set the flag that the dataset is added.
:param ds: Good Earth object.
"""
self.IsGoodEarth = True
self.GoodEarth = ds
def AddEnsembleData(self, ds):
"""
Add a EnsembleData object to the ensemble.
Set the flag that the dataset is added.
:param ds: Ensemble Data object.
"""
self.IsEnsembleData = True
self.EnsembleData = ds
def AddAncillaryData(self, ds):
"""
Add a AncillaryData object to the ensemble.
Set the flag that the dataset is added.
:param ds: Ancillary Data object.
"""
self.IsAncillaryData = True
self.AncillaryData = ds
def AddBottomTrack(self, ds):
"""
Add a Bottom Track Data object to the ensemble.
Set the flag that the dataset is added.
:param ds: Bottom Track Data object.
"""
self.IsBottomTrack = True
self.BottomTrack = ds
def AddRangeTracking(self, ds):
"""
Add a Range Tracking object to the ensemble.
Set the flag that the dataset is added.
:param ds: Range Tracking Data object.
"""
self.IsRangeTracking = True
self.RangeTracking = ds
def AddSystemSetup(self, ds):
"""
Add a System Setup object to the ensemble.
Set the flag that the dataset is added.
:param ds: System Setup Data object.
"""
self.IsSystemSetup = True
self.SystemSetup = ds
def AddNmeaData(self, ds):
"""
Add a NMEA data object to the ensemble.
Set the flag that the dataset is added.
:param ds: NMEA data Data object.
"""
self.IsNmeaData = True
self.NmeaData = ds
def encode(self):
"""
Encode the ensemble to RTB format.
:return:
"""
payload = []
# Generate Payload
if self.IsEnsembleData:
payload += self.EnsembleData.encode()
if self.IsAncillaryData:
payload += self.AncillaryData.encode()
if self.IsAmplitude:
payload += self.Amplitude.encode()
if self.IsCorrelation:
payload += self.Correlation.encode()
if self.IsBeamVelocity:
payload += self.BeamVelocity.encode()
if self.IsInstrumentVelocity:
payload += self.InstrumentVelocity.encode()
if self.IsEarthVelocity:
payload += self.EarthVelocity.encode()
if self.IsGoodBeam:
payload += self.GoodBeam.encode()
if self.IsGoodEarth:
payload += self.GoodEarth.encode()
if self.IsBottomTrack:
payload += self.BottomTrack.encode()
if self.IsRangeTracking:
payload += self.RangeTracking.encode()
if self.IsSystemSetup:
payload += self.SystemSetup.encode()
if self.IsNmeaData:
payload += self.NmeaData.encode()
# Generate the header
# Get the ensemble number
ens_num = 0
if self.IsEnsembleData:
ens_num = self.EnsembleData.EnsembleNumber
# Get the payload size
payload_size = len(payload)
header = Ensemble.generate_ens_header(ens_num, payload_size)
# Generate the Checksum CITT
# Parameters found at https: // pycrc.org / models.html
#crc = pycrc.algorithms.Crc(width=16, poly=0x1021,
# reflect_in=False, xor_in=0x1d0f,
# reflect_out=False, xor_out=0x0000)
#checksum = crc.bit_by_bit_fast(binascii.a2b_hex(bytes(payload)))
#checksum = Ensemble.int32_to_bytes(CRCCCITT().calculate(input_data=bytes(payload)))
checksum = crc16.crc16xmodem(payload)
result = []
result += header
result += payload
result += checksum
return bytearray(result)
@staticmethod
def generate_ens_header(ens_num, payload_size):
"""
Generate the header for an ensemble. This will include
16 0x80 and then the ensemble number and payload size.
The inverse of the ensemble number and payload size are included.
:param ens_num: Ensemble number.
:param payload_size: Payload size.
:return: Header for an ensemble.
"""
header = []
# Get the Header ID
for cnt in range(0, 16):
header.append(0x80)
# Ensemble Number and inverse
header += Ensemble.int32_to_bytes(ens_num)
header += struct.pack("i", ~ens_num)
# Payload size and inverse
header += Ensemble.int32_to_bytes(payload_size)
header += struct.pack("i", ~payload_size)
return header
def encode_csv(self,
is_ensemble_data=True,
is_ancillary_data=True,
is_amplitude=True,
is_correlation=True,
is_beam_velocity=True,
is_instrument_velocity=True,
is_earth_velocity=True,
is_good_beam=True,
is_good_earth=True,
is_bottom_track=True,
is_range_tracking=True,
is_nmea_data=True,
is_system_setup=True):
"""
Encode the ensemble into CSV data.
Each line is a value with a the datetime, KEY, subsystem config, subsystem code,
bin and beam number.
:return:
"""
result = []
dt = datetime.datetime.now()
blank = 0
bin_size = 0
if self.IsAncillaryData:
blank = self.AncillaryData.FirstBinRange
bin_size = self.AncillaryData.BinSize
# Get the subsystem code and config
ss_code = ""
ss_config = ""
if self.IsEnsembleData and is_ensemble_data:
ss_code = self.EnsembleData.SysFirmwareSubsystemCode
ss_config = self.EnsembleData.SubsystemConfig
# Create a new datetime based off ensemble date and time
dt = self.EnsembleData.datetime()
result += self.EnsembleData.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsAncillaryData and is_ancillary_data:
result += self.AncillaryData.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsAmplitude and is_amplitude:
result += self.Amplitude.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsCorrelation and is_correlation:
result += self.Correlation.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsBeamVelocity and is_beam_velocity:
result += self.BeamVelocity.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsInstrumentVelocity and is_instrument_velocity:
result += self.InstrumentVelocity.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsEarthVelocity and is_earth_velocity:
result += self.EarthVelocity.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsGoodBeam and is_good_beam:
result += self.GoodBeam.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsGoodEarth and is_good_earth:
result += self.GoodEarth.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsBottomTrack and is_bottom_track:
result += self.BottomTrack.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsRangeTracking and is_range_tracking:
result += self.RangeTracking.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsNmeaData and is_nmea_data:
result += self.NmeaData.encode_csv(dt, ss_code, ss_config, blank, bin_size)
if self.IsSystemSetup and is_system_setup:
result += self.SystemSetup.encode_csv(dt, ss_code, ss_config, blank, bin_size)
return result
def is_good_bin(self, bin_num: int, min_amp: float = 0.25, min_corr: float = 0.10) -> bool:
"""
Verify the data is good for a given bin. This will check the Amplitude, Correlation
and Earth Velocity data. It will check the Amplitude bin against the minimum amplitude value.
It will check the Correlation against the minimum correlation value. It will check for
BAD_VELOCITY in the Earth Velocity data.
:param ens: Ensemble data.
:param bin_num: Bin Number.
:param min_amp: Minimum Amplitude value.
:param min_corr: Minimum Correlation value.
:return: TRUE if all values pass.
"""
# Verify Amplitude data exist and all is good for the bin
if self.IsAmplitude and not self.Amplitude.is_good_bin(bin_num, min_amp):
return False
# Verify Correlation data exist and all is good for the bin
if self.IsCorrelation and not self.Correlation.is_good_bin(bin_num, min_corr):
return False
# Verify Earth Velocity data exist and all is good for the bin
if self.IsEarthVelocity and not self.EarthVelocity.is_good_bin(bin_num):
return False
return True
@staticmethod
def generate_header(value_type, num_elements, element_multiplier, imag, name_length, name):
"""
Generate the header for an ensemble dataset.
Big Endian
:param value_type: Value type (float, int, string)
:param num_elements: Number of elements or number of bins.
:param element_multiplier: Element multipler or number of beams.
:param imag: NOT USED
:param name_length: Length of the name.
:param name: Name of the dataset.
:return: Header for a dataset.
"""
result = []
result += Ensemble.int32_to_bytes(value_type) # Value Type
result += Ensemble.int32_to_bytes(num_elements) # Number of elements
result += Ensemble.int32_to_bytes(element_multiplier) # Element Multiplier
result += Ensemble.int32_to_bytes(imag) # Image
result += Ensemble.int32_to_bytes(name_length) # Name Length
result += name.encode() # Name
return result
@staticmethod
def crc16_ccitt(crc, data):
msb = crc >> 8
lsb = crc & 255
for c in data:
x = ord(c) ^ msb
x ^= (x >> 4)
msb = (lsb ^ (x >> 3) ^ (x << 4)) & 255
lsb = (x ^ (x << 5)) & 255
return (msb << 8) + lsb
@staticmethod
def array_2d_to_df(vel_array, dt, ss_code, ss_config, blank, bin_size, first_ens_num, last_ens_num):
"""
Convert the given 2D array to a dataframe.
Columns: Index, TimeStamp, Bin, Beam, SS_Code, SS_Config, BinSize, Blank, BinDepth, Value
Columns: Index, time_stamp, ss_code, ss_config, bin_num, beam_num, bin_depth, first_ens_num, last_ens_num, value
dictionary to dataframe to speed up performance
https://stackoverflow.com/questions/27929472/improve-row-append-performance-on-pandas-dataframes
:param vel_array: 2D array containing the data
:param dt: DateTime
:param ss_code: SS Code as a string
:param ss_config: SS Configuration as int
:param blank: Blanking distance.
:param bin_size: Bin Size
:param first_ens_num: First Ensemble Number.
:param last_ens_num: Last Ensemble Number.
:return: Dataframe of all the data from the array given.
"""
# Dictionary to create dataframe
# Faster than appending to a dataframe
dict_result = {}
# A counter to use to add entries to dict
i = 0
if vel_array:
# Go through each bin and beam
for bin_num in range(len(vel_array)):
for beam_num in range(len(vel_array[0])):
# Get the bin depth
bin_depth = Ensemble.get_bin_depth(blank, bin_size, bin_num)
# Get the value
value = vel_array[bin_num][beam_num]
# Create a dict entry
dict_result[i] = {'time_stamp': dt,
'ss_code': ss_code,
'ss_config': ss_config,
'bin_num': bin_num,
'beam_num': beam_num,
'bin_depth': bin_depth,
'first_ens_num': first_ens_num,
'last_ens_num': last_ens_num,
'value': value}
# Increment index
i = i + 1
# Create the dataframe from the dictionary
# important to set the 'orient' parameter to "index" to make the keys as rows
df = pd.DataFrame.from_dict(dict_result, "index")
return df
@staticmethod
def array_1d_to_df(vel_array, dt, ss_code, ss_config, blank, bin_size, first_ens_num, last_ens_num):
"""
Convert the given 1D array to a dataframe.
Columns: Index, TimeStamp, Bin, Beam, SS_Code, SS_Config, BinSize, Blank, BinDepth, Value
Columns: Index, time_stamp, ss_code, ss_config, bin_num, beam_num, bin_depth, first_ens_num, last_ens_num, value
dictionary to dataframe to speed up performance
https://stackoverflow.com/questions/27929472/improve-row-append-performance-on-pandas-dataframes
:param vel_array: 2D array containing the data
:param dt: DateTime
:param ss_code: SS Code as a string
:param ss_config: SS Configuration as int
:param blank: Blanking distance.
:param bin_size: Bin Size
:param first_ens_num: First Ensemble Number.
:param last_ens_num: Last Ensemble Number.
:return: Dataframe of all the data from the array given.
"""
# Dictionary to create dataframe
# Faster than appending to a dataframe
dict_result = {}
# A counter to use to add entries to dict
i = 0
if vel_array:
# Go through each bin and beam
for bin_num in range(len(vel_array)):
# Get the bin depth
bin_depth = Ensemble.get_bin_depth(blank, bin_size, bin_num)
# Get the value
value = vel_array[bin_num]
# Create a dict entry
dict_result[i] = {'time_stamp': dt,
'ss_code': ss_code,
'ss_config': ss_config,
'bin_num': bin_num,
'beam_num': 0,
'bin_depth': bin_depth,
'first_ens_num': first_ens_num,
'last_ens_num': last_ens_num,
'value': value}
# Increment index
i = i + 1
# Create the dataframe from the dictionary
# important to set the 'orient' parameter to "index" to make the keys as rows
df =
|
pd.DataFrame.from_dict(dict_result, "index")
|
pandas.DataFrame.from_dict
|
# Exports PRISM temperature and PRISM precipitation averaged with Naselle gauge for a given time period
# Script written in Python 3.7
import __init__
import scripts.config as config
import pandas as pd
import matplotlib.pyplot as plt
import mpld3
import numpy as np
# =======================================================================
# Config
start = pd.to_datetime('01-01-1984')
end = pd.to_datetime('12-31-2020')
# Average PRISM and Naselle gauge precipitation
gauge = pd.read_csv(config.daily_ppt.parents[0] / 'GHCND_USC00455774_1929_2020.csv', parse_dates=True, index_col=5)
gauge['SNOW'].fillna(0, inplace=True)
gauge['SNOW_SWE'] = gauge['SNOW'] / 13
gauge['PRCP_TOT'] = gauge['PRCP'] + gauge['SNOW_SWE']
gauge_precip = gauge[['PRCP_TOT']]
prism_precip = pd.read_csv(config.daily_ppt, parse_dates=True, index_col=0)
# Expand precip record to full date range in case some days are missing
rng =
|
pd.date_range(start, end)
|
pandas.date_range
|
"""
Operator to slide a fixed length window across a timeseries dataframe
"""
import pandas as pd
from tqdm import tqdm
from tasrif.processing_pipeline import ProcessingOperator
class SlidingWindowOperator(ProcessingOperator):
"""
From a timeseries dataframe of participants, this function generates two dataframes:
<time_series_features>, <labels>
The first dataframe can be used with tsfresh later on,
while the second has all the labels that we want to predict.
Notice that the default winsize is 1h and 15 minutres (`1h15t`).
We use the first hour to extract the features and the 15 min only to collect the ground_truth labels.
Examples
--------
import pandas as pd
from tasrif.processing_pipeline.custom import SlidingWindowOperator
>>> df = pd.DataFrame([
... ["2020-02-16 11:45:00",27,102.5],
... ["2020-02-16 12:00:00",27,68.5],
... ["2020-02-16 12:15:00",27,40.0],
... ["2020-02-16 15:15:00",27,282.5],
... ["2020-02-16 15:30:00",27,275.0],
... ["2020-02-16 15:45:00",27,250.0],
... ["2020-02-16 16:00:00",27,235.0],
... ["2020-02-16 16:15:00",27,206.5],
... ["2020-02-16 16:30:00",27,191.0],
... ["2020-02-16 16:45:00",27,166.5],
... ["2020-02-16 17:00:00",27,171.5],
... ["2020-02-16 17:15:00",27,152.0],
... ["2020-02-16 17:30:00",27,124.0],
... ["2020-02-16 17:45:00",27,106.0],
... ["2020-02-16 18:00:00",27,96.5],
... ["2020-02-16 18:15:00",27,86.5],
... ["2020-02-16 17:30:00",31,186.0],
... ["2020-02-16 17:45:00",31,177.0],
... ["2020-02-16 18:00:00",31,171.0],
... ["2020-02-16 18:15:00",31,164.0],
... ["2020-02-16 18:30:00",31,156.0],
... ["2020-02-16 18:45:00",31,157.0],
... ["2020-02-16 19:00:00",31,158.0],
... ["2020-02-16 19:15:00",31,158.5],
... ["2020-02-16 19:30:00",31,150.0],
... ["2020-02-16 19:45:00",31,145.0],
... ["2020-02-16 20:00:00",31,137.0],
... ["2020-02-16 20:15:00",31,141.0],
... ["2020-02-16 20:45:00",31,146.0],
... ["2020-02-16 21:00:00",31,141.0]],
... columns=['dateTime','patientID','CGM'])
>>> df['dateTime'] = pd.to_datetime(df['dateTime'])
>>> df
>>> op = SlidingWindowOperator(winsize="1h15t",
... time_col="dateTime",
... label_col="CGM",
... participant_identifier="patientID")
>>> df_timeseries, df_labels, df_label_time, df_pids = op.process(df)[0]
>>> df_timeseries
. dateTime CGM seq_id
0 2020-02-16 15:15:00 282.5 0
1 2020-02-16 15:30:00 275.0 0
2 2020-02-16 15:45:00 250.0 0
3 2020-02-16 16:00:00 235.0 0
4 2020-02-16 15:30:00 275.0 1
... ... ... ...
143 2020-02-16 19:45:00 145.0 35
144 2020-02-16 19:15:00 158.5 36
145 2020-02-16 19:30:00 150.0 36
146 2020-02-16 19:45:00 145.0 36
147 2020-02-16 20:00:00 137.0 36
148 rows × 3 columns
"""
def __init__( # pylint: disable=too-many-arguments
self,
winsize="1h15t",
period=15,
time_col="time",
label_col="CGM",
participant_identifier="patientID",
):
"""Creates a new instance of SlidingWindowsOperator
Args:
winsize (int, offset):
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
If its an offset then this will be the time period of each window.
period (int):
periodicity expected between rows. Only used if winsize is an offset
time_col (str):
time column in the dataframe
label_col (str):
label column in the dataframe
participant_identifier (str):
participant id column in the dataframe
"""
super().__init__()
self.winsize = winsize
self.period = period
self.time_col = time_col
self.label_col = label_col
self.participant_identifier = participant_identifier
def _process(self, *data_frames):
"""Processes the passed data frame as per the configuration define in the constructor.
Args:
*data_frames (list of pd.DataFrame):
Variable number of pandas dataframes to be processed
Returns:
pd.DataFrame -or- list[pd.DataFrame]
Processed dataframe(s) resulting from applying the operator
"""
# Determine window size if self.winsize is an offset
window_size_in_rows = self.winsize
if isinstance(self.winsize, str):
window_size_in_rows = self._determine_window_size()
processed = []
for data_frame in data_frames:
df_labels = []
df_label_time = []
df_timeseries = []
df_pids = []
last_seq_id = 0
for pid in tqdm(data_frame[self.participant_identifier].unique()):
(
last_seq_id,
df_ts_tmp,
df_label_tmp,
df_label_time_tmp,
df_pid,
) = self._generate_slide_wins(
data_frame[data_frame[self.participant_identifier] == pid],
start_seq=last_seq_id,
max_size=window_size_in_rows,
)
df_timeseries.append(df_ts_tmp)
df_labels.append(df_label_tmp)
df_label_time.append(df_label_time_tmp)
df_pids.append(df_pid)
df_labels = pd.concat(df_labels).reset_index(drop=True)
df_label_time = pd.concat(df_label_time).reset_index(drop=True)
df_timeseries = pd.concat(df_timeseries).reset_index(drop=True)
df_pids = pd.concat(df_pids).reset_index(drop=True)
df_pids.name = "pid"
processed.append([df_timeseries, df_labels, df_label_time, df_pids])
return processed
def _generate_slide_wins(self, df_in, start_seq=0, max_size=5):
"""
The following code will construct a rolling win that could be based on either time or #win
This will feed list_of_indexes with the sub-win indices that will be used in the next for loop
Time-based win might be smaller than the expected size. We fix it by comparing the size of each
value in the list_of_indices with the size of the last element
Args:
df_in (pd.DataFrame):
Pandas DataFrame that has been filtered by `_process_epoch`
start_seq (int):
sequence id
max_size:
window size to keep for the returned data
Returns:
seq_id (int):
last sequence id
transformed_df (pd.DataFrame):
Dataframe that contains the windows labeled by sequence ids
lables (pd.Series):
Series of ground truths of self.label_col
label_times (pd.Series):
Series of ground truths of datetime
pid (pd.Series):
Series of patient ID that belong to transformed_df
Raises:
ValueError: Occurs when _generate_slide_wins is called for more than one pid
"""
seq_id = start_seq
transformed_df = []
list_of_indices = []
labels = []
label_times = []
pid = df_in[self.participant_identifier].unique()
if len(pid) > 1:
raise ValueError("_generate_slide_wins must be called with one pid")
pid = pid[0]
dataframe = df_in.reset_index(drop=True).copy()
dataframe.reset_index().rolling(self.winsize, on=self.time_col, center=False)[
"index"
].apply((lambda x: list_of_indices.append(x.tolist()) or 0))
# Append label index to labels list
for idx in list_of_indices:
if len(idx) != max_size:
continue
labels.append(dataframe.loc[idx].iloc[-1][self.label_col])
label_times.append(dataframe.loc[idx].iloc[-1][self.time_col])
tmp_df = dataframe.loc[idx[0:-1]].copy()
tmp_df["seq_id"] = seq_id
seq_id += 1
del tmp_df[self.participant_identifier]
transformed_df.append(tmp_df)
labels = pd.Series(labels)
labels.name = "ground_truth"
label_times = pd.Series(label_times)
label_times.name = "gt_time"
if transformed_df:
transformed_df =
|
pd.concat(transformed_df)
|
pandas.concat
|
#! /usr/bin/env python
import argparse
import os
import sys
from time import strftime
import pysam
from hashed_read_genome_array import HashedReadBAMGenomeArray, ReadKeyMapFactory, read_length_nmis #, get_hashed_counts
from plastid.genomics.roitools import SegmentChain, positionlist_to_segments
import multiprocessing as mp
from scipy.optimize import nnls
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Use linear regression to quantify expression of the ORFs identified by ORF-RATER. Reported values are '
'in reads per nucleotide; any additional normalization(s) (such as for read depth must be performed in '
'post-processing. The number of nucleotides used to quantify each ORF is also included in the output. '
'ORFs may receive NaN values if they are too short (and therefore masked out by STARTMASK and STOPMASK) '
'or if they are indistinguishable from another ORF after those regions have been masked.')
parser.add_argument('bamfiles', nargs='+', help='Path to transcriptome-aligned BAM file(s) for read data. Expression values will be quantified from '
'each independently.')
parser.add_argument('--names', nargs='+', help='Names to use to refer to BAMFILES, e.g. if the filenames themselves are inconveniently long or '
'insufficiently descriptive. (Default: inferred from BAMFILES)')
parser.add_argument('--subdir', default=os.path.curdir,
help='Convenience argument when dealing with multiple datasets. In such a case, set SUBDIR to an appropriate name (e.g. HARR, '
'CHX) to avoid file conflicts. (Default: current directory)')
parser.add_argument('--inbed', default='transcripts.bed', help='Transcriptome BED-file (Default: transcripts.bed)')
parser.add_argument('--offsetfile', default='offsets.txt',
help='Path to 2-column tab-delimited file with 5\' offsets for variable P-site mappings. First column indicates read length, '
'second column indicates offset to apply. Read lengths are calculated after trimming up to MAX5MIS 5\' mismatches. Accepted '
'read lengths are defined by those present in the first column of this file. If SUBDIR is set, this file is assumed to be '
'in that directory. (Default: offsets.txt)')
parser.add_argument('--max5mis', type=int, default=1, help='Maximum 5\' mismatches to trim. Reads with more than this number will be excluded. '
'(Default: 1)')
parser.add_argument('--startmask', type=int, nargs=2, default=[1, 2],
help='Region around start codons (in codons) to exclude from quantification. (Default: 1 2, meaning one full codon before the '
'start is excluded, as are the start codon and the codon following it).')
parser.add_argument('--stopmask', type=int, nargs=2, default=[3, 0],
help='Region around stop codons (in codons) to exclude from quantification. (Default: 3 0, meaning three codons before and '
'including the stop are excluded, but none after).')
parser.add_argument('--metagenefile', default='metagene.txt',
help='File to be used as the metagene, generated by regress_orfs.py. If SUBDIR is set, this file is assumed to be in that '
'directory. (Default: metagene.txt)')
parser.add_argument('--ratingsfile', default='orfratings.h5',
help='Path to pandas HDF store containing ORF ratings; generated by rate_regression_output.py (Default: orfratings.h5)')
parser.add_argument('--minrating', type=float, default=0.8, help='Minimum ORF rating to require for an ORF to be quantified (Default: 0.8)')
parser.add_argument('--minlen', type=int, default=0, help='Minimum ORF length (in amino acids) to be included in the BED file (Default: 0)')
parser.add_argument('--quantfile', default='quant.h5',
help='Filename to which to output the table of quantified translation values for each ORF. Formatted as pandas HDF; table name '
'is "quant". If SUBDIR is set, this file will be placed in that directory. (Default: quant.h5)')
parser.add_argument('--CSV', help='If included, also write output in CSV format to the provided filename.')
parser.add_argument('-v', '--verbose', action='count', help='Output a log of progress and timing (to stdout). Repeat for higher verbosity level.')
parser.add_argument('-p', '--numproc', type=int, default=1, help='Number of processes to run. Defaults to 1 but more recommended if available.')
parser.add_argument('-f', '--force', action='store_true', help='Force file overwrite')
opts = parser.parse_args()
offsetfilename = os.path.join(opts.subdir, opts.offsetfile)
metafilename = os.path.join(opts.subdir, opts.metagenefile)
quantfilename = os.path.join(opts.subdir, opts.quantfile)
if not opts.force:
if os.path.exists(quantfilename):
raise IOError('%s exists; use --force to overwrite' % quantfilename)
if opts.CSV and os.path.exists(opts.CSV):
raise IOError('%s exists; use --force to overwrite' % opts.CSV)
if opts.names:
if len(opts.bamfiles) != len(opts.names):
raise ValueError('Precisely one name must be provided for each BAMFILE')
colnames = opts.names
else:
colnames = [os.path.splitext(os.path.basename(bamfile))[0] for bamfile in opts.bamfiles] # '/path/to/myfile.bam' -> 'myfile'
if opts.verbose:
sys.stdout.write(' '.join(sys.argv) + '\n')
def logprint(nextstr):
sys.stdout.write('[%s] %s\n' % (strftime('%Y-%m-%d %H:%M:%S'), nextstr))
sys.stdout.flush()
log_lock = mp.Lock()
rdlens = []
Pdict = {}
with open(offsetfilename, 'rU') as infile:
for line in infile:
ls = line.strip().split()
rdlen = int(ls[0])
for nmis in range(opts.max5mis+1):
Pdict[(rdlen, nmis)] = int(ls[1])+nmis # e.g. if nmis == 1, offset as though the read were missing that base entirely
rdlens.append(rdlen)
rdlens.sort()
# hash transcripts by ID for easy reference later
with open(opts.inbed, 'rU') as inbed:
bedlinedict = {line.split()[3]: line for line in inbed}
with pd.HDFStore(opts.ratingsfile, mode='r') as ratingstore:
chroms = ratingstore.select('orfratings/meta/chrom/meta').values # because saved as categorical, this is the list of all chromosomes
if opts.verbose:
logprint('Loading metagene')
metagene =
|
pd.read_csv(metafilename, sep='\t')
|
pandas.read_csv
|
import argparse
import json
import os
import pandas as pd
import numpy as np
import csv
import glob
from pathlib import Path
# requirements.txt
import subprocess
import sys
# this doesn't work
#subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'simpletransformers==0.22.1'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'tensorboardx==2.0'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'torch==1.4.0'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'torchvision==0.5.0'])
import torch
import transformers as ppb
import sklearn
from sklearn.model_selection import train_test_split
def list_arg(raw_value):
"""argparse type for a list of strings"""
return str(raw_value).split(',')
def parse_args():
# Unlike SageMaker training jobs (which have `SM_HOSTS` and `SM_CURRENT_HOST` env vars), processing jobs to need to parse the resource config file directly
resconfig = {}
try:
with open('/opt/ml/config/resourceconfig.json', 'r') as cfgfile:
resconfig = json.load(cfgfile)
except FileNotFoundError:
print('/opt/ml/config/resourceconfig.json not found. current_host is unknown.')
pass # Ignore
# Local testing with CLI args
parser = argparse.ArgumentParser(description='Process')
parser.add_argument('--hosts', type=list_arg,
default=resconfig.get('hosts', ['unknown']),
help='Comma-separated list of host names running the job'
)
parser.add_argument('--current-host', type=str,
default=resconfig.get('current_host', 'unknown'),
help='Name of this host running the job'
)
parser.add_argument('--input-data', type=str,
default='/opt/ml/processing/input/data',
)
parser.add_argument('--output-data', type=str,
default='/opt/ml/processing/output',
)
return parser.parse_args()
def process(args):
print('Current host: {}'.format(args.current_host))
# print('Listing contents of {}'.format(args.input_data))
# dirs_input = os.listdir(args.input_data)
train_data = None
validation_data = None
test_data = None
# This would print all the files and directories
for file in glob.glob('{}/*.tsv.gz'.format(args.input_data)):
print(file)
filename_without_extension = Path(Path(file).stem).stem
# chunksize=100 seems to work well
df_reader = pd.read_csv(file,
delimiter='\t',
quoting=csv.QUOTE_NONE,
compression='gzip',
chunksize=100)
for df in df_reader:
df.shape
df.head(5)
df.isna().values.any()
df = df.dropna()
df = df.reset_index(drop=True)
df.shape
df['is_positive_sentiment'] = (df['star_rating'] >= 4).astype(int)
df.shape
###########
# TODO: increase batch size and run through all the data
###########
# Note: we need to keep this at size 100
batch_1 = df[['review_body', 'is_positive_sentiment']]
batch_1.shape
batch_1.head(5)
# ## Loading the Pre-trained BERT model
# Let's now load a pre-trained BERT model.
# For DistilBERT (lightweight for a notebook like this):
#model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
# For Bert (requires a lot more memory):
model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')
# Load pretrained model/tokenizer
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
# Right now, the variable `model` holds a pretrained BERT or distilBERT model (a version of BERT that is smaller, but much faster and requiring a lot less memory.)
#
# ## Preparing the Dataset
# Before we can hand our sentences to BERT, we need to so some minimal processing to put them in the format it requires.
#
# ### Tokenization
# Our first step is to tokenize the sentences -- break them up into word and subwords in the format BERT is comfortable with.
tokenized = batch_1['review_body'].apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))
# ### Padding
# After tokenization, `tokenized` is a list of sentences -- each sentences is represented as a list of tokens. We want BERT to process our examples all at once (as one batch). It's just faster that way. For that reason, we need to pad all lists to the same size, so we can represent the input as one 2-d array, rather than a list of lists (of different lengths).
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0]*(max_len-len(i)) for i in tokenized.values])
# Our dataset is now in the `padded` variable, we can view its dimensions below:
np.array(padded).shape
# ### Masking
# If we directly send `padded` to BERT, that would slightly confuse it. We need to create another variable to tell it to ignore (mask) the padding we've added when it's processing its input. That's what attention_mask is:
attention_mask = np.where(padded != 0, 1, 0)
attention_mask.shape
# The `model()` function runs our sentences through BERT. The results of the processing will be returned into `last_hidden_states`.
input_ids = torch.tensor(padded)
attention_mask = torch.tensor(attention_mask)
with torch.no_grad():
last_hidden_states = model(input_ids, attention_mask=attention_mask)
features = last_hidden_states[0][:,0,:].numpy()
print(features)
print(type(features))
labels = batch_1['is_positive_sentiment']
print(labels)
print(type(labels))
# TODO: Merge features and labels for our purpose here
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, stratify=batch_1['is_positive_sentiment'])
# # Split all data into 90% train and 10% holdout
# train_features, holdout_features, train_labels, holdout_labels = train_test_split(features, labels, stratify=batch_1['is_positive_sentiment'])
# # Split the holdout into 50% validation and 50% test
# validation_features, test_features, validation_labels, test_labels = train_test_split(holdout_features, holdout_labels, stratify=batch_1['is_positive_sentiment'])
train_features.shape
print(train_features)
print(type(train_features))
df_train_features = pd.DataFrame(train_features)
df_train_labels =
|
pd.DataFrame(train_labels)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-08-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-09-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-10-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-11-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-12-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_feb():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-08-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(
|
Timestamp('2014-09-01 00:00:00')
|
pandas.Timestamp
|
from smartapi import SmartConnect
import pandas as pd
from datetime import datetime, timedelta
import credentials
import requests
import numpy as np
from time import time, sleep
from talib.abstract import *
import threading
import warnings
warnings.filterwarnings('ignore')
SYMBOL_LIST = ['CDSL','IEX']
TRADED_SYMBOL = []
timeFrame = 60 + 5 #5 sec coz dealy repsone of historical API
def place_order(token,symbol,qty,buy_sell,ordertype,price,variety= 'NORMAL',exch_seg='NSE',triggerprice=0):
try:
orderparams = {
"variety": variety,
"tradingsymbol": symbol,
"symboltoken": token,
"transactiontype": buy_sell,
"exchange": exch_seg,
"ordertype": ordertype,
"producttype": "INTRADAY",
"duration": "DAY",
"price": price,
"squareoff": "0",
"stoploss": "0",
"quantity": qty,
"triggerprice":triggerprice
}
orderId=credentials.SMART_API_OBJ.placeOrder(orderparams)
print("The order id is: {}".format(orderId))
except Exception as e:
print("Order placement failed: {}".format(e.message))
def intializeSymbolTokenMap():
url = 'https://margincalculator.angelbroking.com/OpenAPI_File/files/OpenAPIScripMaster.json'
d = requests.get(url).json()
global token_df
token_df = pd.DataFrame.from_dict(d)
token_df['expiry'] =
|
pd.to_datetime(token_df['expiry'])
|
pandas.to_datetime
|
#!/usr/bin/env python
"""Tests for `qnorm` package."""
import unittest
import numpy as np
import pandas as pd
import qnorm
import tracemalloc
tracemalloc.start()
df1 = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
df1.to_csv("test.csv")
df1.to_hdf("test.hdf", key="qnorm", format="table", data_columns=True, mode="w")
df1.to_parquet("test.parquet")
class TestQnorm(unittest.TestCase):
def test_000_numpy(self):
"""
test numpy support
"""
arr = np.random.normal(size=(20, 2))
qnorm.quantile_normalize(arr)
def test_001_pandas(self):
"""
test pandas support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
qnorm.quantile_normalize(df)
def test_002_wiki(self):
"""
test the wiki example
https://en.wikipedia.org/wiki/Quantile_normalization
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df).values, result
)
def test_003_no_change(self):
"""
no sorting should happen here
"""
arr = np.empty(shape=(20, 3))
for col in range(arr.shape[1]):
vals = np.arange(arr.shape[0])
np.random.shuffle(vals)
arr[:, col] = vals
qnorm_arr = qnorm.quantile_normalize(arr)
np.testing.assert_array_almost_equal(arr, qnorm_arr)
def test_004_double(self):
"""
if dtype is double, return double
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float64)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float64
def test_005_single(self):
"""
if dtype is single, return single
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float32)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float32
def test_006_target(self):
"""
test if the target is used instead of the qnorm values
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
target = np.arange(10, 20)
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_007_target_notsorted(self):
"""
make sure an unsorted target gets sorted first
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
# take the reverse, which should be sorted by qnorm
target = np.arange(10, 20)[::-1]
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_008_short_target(self):
"""
test if an error is raised with a invalid sized target
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
target = np.arange(10, 15)
self.assertRaises(ValueError, qnorm.quantile_normalize, arr, target)
def test_009_wiki_ncpus(self):
"""
test if an error is raised with a invalid sized target
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, ncpus=10).values, result
)
def test_010_axis_numpy(self):
"""
test numpy axis support
"""
arr = np.random.normal(size=(50, 4))
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr.T, axis=0).T,
qnorm.quantile_normalize(arr, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr, axis=1),
qnorm.quantile_normalize(arr.T, axis=0).T,
)
def test_011_axis_pandas(self):
"""
test numpy axis support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df.T, axis=0).T,
qnorm.quantile_normalize(df, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, axis=1),
qnorm.quantile_normalize(df.T, axis=0).T,
)
def test_012_from_csv(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.csv", "test_out.csv")
df1 = pd.read_csv("test.csv", index_col=0, header=0)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_013_from_csv_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", rowchunksize=rowchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_014_from_csv_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", colchunksize=colchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_015_from_csv_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv",
"test_out.csv",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_016_from_csv_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(index=range(5000), columns=range(100))
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_csv("test_large.csv")
qnorm.incremental_quantile_normalize(
"test_large.csv",
"test_large_out.csv",
rowchunksize=11,
colchunksize=11,
)
df2 = pd.read_csv("test_large_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=4
)
def test_017_from_hdf(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.hdf", "test_out.hdf")
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_018_from_hdf_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf", "test_out.hdf", rowchunksize=rowchunksize
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_019_from_hdf_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf", "test_out.hdf", colchunksize=colchunksize
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_020_from_hdf_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf",
"test_out.hdf",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_021_from_hdf_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(
index=range(5000),
columns=["sample" + str(col) for col in range(100)],
dtype=int,
)
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_hdf(
"test_large.hdf", key="qnorm", format="table", data_columns=True
)
qnorm.incremental_quantile_normalize(
"test_large.hdf",
"test_large_out.hdf",
rowchunksize=11,
colchunksize=11,
)
df2 = pd.read_hdf("test_large_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=4
)
def test_022(self):
"""
Test another array, not just wiki example.
"""
df = pd.DataFrame(
{
"C1": {
"A": 2.0,
"B": 2.0,
"C": 2.0,
"D": 2.0,
"E": 6.0,
"F": 1.0,
},
"C2": {
"A": 2.0,
"B": 2.0,
"C": 1.0,
"D": 3.5,
"E": 5.0,
"F": 1.0,
},
}
)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df).values,
np.array(
[
[2.0625, 2.0],
[2.0625, 2.0],
[2.0625, 1.25],
[2.0625, 2.75],
[5.5, 5.5],
[1.0, 1.25],
]
),
)
def test_023_from_parquet(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.parquet", "test_out.parquet")
df1 = pd.read_parquet("test.parquet")
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_024_from_parquet_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_parquet("test.parquet")
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet", "test_out.parquet", rowchunksize=rowchunksize
)
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_025_from_parquet_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_parquet("test.parquet")
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet", "test_out.parquet", colchunksize=colchunksize
)
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_026_from_parquet_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_parquet("test.parquet")
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet",
"test_out.parquet",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_027_from_parquet_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(
index=range(5000),
columns=["sample" + str(col) for col in range(100)],
)
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1 = df1.astype(float)
df1.to_parquet("test_large.parquet")
qnorm.incremental_quantile_normalize(
"test_large.parquet",
"test_large_out.parquet",
rowchunksize=11,
colchunksize=11,
)
df2 =
|
pd.read_parquet("test_large_out.parquet")
|
pandas.read_parquet
|
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red3 Button
if self.cb_red3.text().split()[0] != 'None':
self.cb_red_plot_3.clear()
self.cb_red_plot_3.setTitle(red_range['describe'].iloc[2])
self.cb_red_plot_3.addLegend(offset=(-30, 20))
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red4 Button
if self.cb_red4.text().split()[0] != 'None':
self.cb_red_plot_4.clear()
self.cb_red_plot_4.setTitle(red_range['describe'].iloc[3])
self.cb_red_plot_4.addLegend(offset=(-30, 20))
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange1 Button
if self.cb_orange1.text().split()[0] != 'None':
self.cb_orange_plot_1.clear()
self.cb_orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.cb_orange_plot_1.addLegend(offset=(-30, 20))
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange2 Button
if self.cb_orange2.text().split()[0] != 'None':
self.cb_orange_plot_2.clear()
self.cb_orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.cb_orange_plot_2.addLegend(offset=(-30, 20))
self.cb_orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange3 Button
if self.cb_orange3.text().split()[0] != 'None':
self.cb_orange_plot_3.clear()
self.cb_orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.cb_orange_plot_3.addLegend(offset=(-30, 20))
self.cb_orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange4 Button
if self.cb_orange4.text().split()[0] != 'None':
self.cb_orange_plot_4.clear()
self.cb_orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.cb_orange_plot_4.addLegend(offset=(-30, 20))
self.cb_orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange5 Button
if self.cb_orange5.text().split()[0] != 'None':
self.cb_orange_plot_5.clear()
self.cb_orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.cb_orange_plot_5.addLegend(offset=(-30, 20))
self.cb_orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange6 Button
if self.cb_orange6.text().split()[0] != 'None':
self.cb_orange_plot_6.clear()
self.cb_orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.cb_orange_plot_6.addLegend(offset=(-30, 20))
self.cb_orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange7 Button
if self.cb_orange7.text().split()[0] != 'None':
self.cb_orange_plot_7.clear()
self.cb_orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.cb_orange_plot_7.addLegend(offset=(-30, 20))
self.cb_orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange8 Button
if self.cb_orange8.text().split()[0] != 'None':
self.cb_orange_plot_8.clear()
self.cb_orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.cb_orange_plot_8.addLegend(offset=(-30, 20))
self.cb_orange_plot_8.plot(x=symptom_db[0], y=
|
pd.DataFrame(symptom_db[1])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020/12/31 4:01 下午
# @File : compare_eval_result.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc :
import json
import pandas as pd
import requests
def collect_data(devfile="../data_root_dir/newcos/dev.json", eval_results="../output_root_dir/newcos/eval_results-newcos.json"):
"""
生成excel, 对比main.trainer.py生成的结果和devfile
:param devfile: 训练文件,格式是 [(text, keyword, labels),..]
:param eval_results: main.trainer.py生成的文件output文件中的json文件 [(predid, probality)]
:return:
"""
labels = ["是","否"]
with open(devfile) as f:
dev_data = json.load(f)
with open(eval_results) as f:
eval_data = json.load(f)
assert len(dev_data) == len(eval_data)
data = []
for d, res in zip(dev_data, eval_data):
one_data = {"text": d[0], "keyword":d[1], "label": d[2], "predict":labels[res[0]], "probability": format(res[1], "0.3f")}
data.append(one_data)
df = pd.DataFrame(data)
excel_file = "result2.xlsx"
writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')
df.to_excel(writer)
writer.save()
print(f"保存到excel成功{excel_file}")
return data
def compare_model(hostname='http://127.0.0.1:3314'):
"""
把收集到的数据,放到线上,对比一下准确率,不是和咱们自己的模型对比
:param hostname:
:return:
"""
url = hostname + '/lavector/rest/aspect-sentiment-batch'
headers = {'Content-Type': 'application/json'}
mydata = collect_data()
post_data = []
for d in mydata:
one = (d["text"], [d["keyword"]])
post_data.append(one)
data = {'channel': 'jd', 'data': post_data}
print(f"发送请求到{url}, 数据量{len(post_data)}")
res = requests.post(url, data=json.dumps(data), headers=headers)
result = res.json()
myresults = []
for r in result['result']:
keyword_list = list(r.keys())
pres_list = list(r.values())
assert len(keyword_list) == 1
assert len(pres_list) == 1
keyword = keyword_list[0]
pres = pres_list[0]
for k,v in pres.items():
if v == 1:
if k == "负向":
predict = "消极"
elif k =="正向":
predict = "积极"
else:
predict = "中性"
myresults.append([keyword,predict])
assert len(post_data) == len(myresults)
#保存到文件
newdata = []
for d, res in zip(mydata, myresults):
if res[0] != d["keyword"]:
print(f"这条数据预测回来的关键字不一致{res[0]}")
continue
d["online_predict"] = res[1]
newdata.append(d)
df =
|
pd.DataFrame(newdata)
|
pandas.DataFrame
|
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
return assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == 'block':
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left_index, right_index)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.values, right.values,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(np.asarray(left.values),
np.asarray(right.values))
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
if check_fill_value:
assert_attr_equal('default_fill_value', left, right, obj=obj)
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(
series, right[col],
check_dtype=check_dtype,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices
)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):
return pd.timedelta_range(start='1 day', periods=k, freq=freq,
name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product(
(('foo', 'bar'), (1, 2)), names=names, **kwargs)
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeRangeIndex,
makeIntervalIndex, makeCategoricalIndex,
makeMultiIndex
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return
|
DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit
from metalearn.metafeatures.base import build_resources_info, ResourceComputer
import metalearn.metafeatures.constants as consts
def get_X(X_raw):
return X_raw.dropna(axis=1, how="all"),
get_X = ResourceComputer(get_X, ["X"])
def get_cv_seed(seed_base, seed_offset):
return (seed_base + seed_offset,)
get_cv_seed = ResourceComputer(get_cv_seed, ["cv_seed"], {'seed_offset': 1})
def sample_columns(X, sample_shape, seed):
if sample_shape[1] is None or X.shape[1] <= sample_shape[1]:
X_sample = X
else:
np.random.seed(seed)
sampled_column_indices = np.random.choice(
X.shape[1], size=sample_shape[1], replace=False
)
sampled_columns = X.columns[sampled_column_indices]
X_sample = X[sampled_columns]
return (X_sample,)
sample_columns = ResourceComputer(
sample_columns,
["XSampledColumns"],
{ "seed": 2 }
)
def sample_rows(X, Y, sample_shape, seed):
"""
Stratified uniform sampling of rows, according to the classes in Y.
Ensures there are enough samples from each class in Y for cross
validation.
"""
if sample_shape[0] is None or X.shape[0] <= sample_shape[0]:
X_sample, Y_sample = X, Y
elif Y is None:
np.random.seed(seed)
row_indices = np.random.choice(
X.shape[0], size=sample_shape[0], replace=False
)
X_sample, Y_sample = X.iloc[row_indices], Y
else:
drop_size = X.shape[0] - sample_shape[0]
sample_size = sample_shape[0]
sss = StratifiedShuffleSplit(
n_splits=2, test_size=drop_size, train_size=sample_size, random_state=seed
)
row_indices, _ = next(sss.split(X, Y))
X_sample, Y_sample = X.iloc[row_indices], Y.iloc[row_indices]
return (X_sample, Y_sample)
sample_rows = ResourceComputer(
sample_rows,
["XSample","YSample"],
{ "X": "XSampledColumns", "seed": 3 }
)
def get_preprocessed_data(X_sample, X_sampled_columns, column_types, seed):
series_array = []
for feature in X_sample.columns:
is_text = False
feature_series = X_sample[feature].copy()
col = feature_series.values
dropped_nan_series = X_sampled_columns[feature].dropna(
axis=0,how='any'
)
num_nan = np.sum(feature_series.isnull())
np.random.seed(seed)
col[feature_series.isnull()] = np.random.choice(
dropped_nan_series, size=num_nan
)
if column_types[feature_series.name] == consts.CATEGORICAL:
feature_series = pd.get_dummies(feature_series)
elif column_types[feature_series.name] == consts.TEXT:
is_text = True
if not is_text:
series_array.append(feature_series)
return (pd.concat(series_array, axis=1, copy=False),)
get_preprocessed_data = ResourceComputer(
get_preprocessed_data,
["XPreprocessed"],
{
"X_sample": "XSample",
"X_sampled_columns": "XSampledColumns",
"seed": 4
}
)
def get_categorical_features_with_no_missing_values(
X_sample, column_types
):
categorical_features_with_no_missing_values = []
for feature in X_sample.columns:
if column_types[feature] == consts.CATEGORICAL:
no_nan_series = X_sample[feature].dropna(
axis=0, how='any'
)
categorical_features_with_no_missing_values.append(
no_nan_series
)
return (categorical_features_with_no_missing_values,)
get_categorical_features_with_no_missing_values = ResourceComputer(
get_categorical_features_with_no_missing_values,
["NoNaNCategoricalFeatures"],
{ "X_sample": "XSample" }
)
def get_categorical_features_and_class_with_no_missing_values(
X_sample, Y_sample, column_types
):
categorical_features_and_class_with_no_missing_values = []
for feature in X_sample.columns:
if column_types[feature] == consts.CATEGORICAL:
df =
|
pd.concat([X_sample[feature],Y_sample], axis=1)
|
pandas.concat
|
# @name: ont_dict.py
# @title: Creates an ontology dictionary for all terms in the NGLY1 graph network
# @description: Main outer file does the merge and checks for missing values.
# [Data sources](https://github.com/flaneuse/ntwk-explr/blob/master/datain/DATA_README.md)
# [Data pipeline](https://docs.google.com/presentation/d/1dk_1lTGAhB1tJZuUH9yfJoAZwznedHM_DHrCVFBqeW8/edit#slide=id.g3303550b82_0_110)
# @sources: Ontology structures via OLS (GO, HP, MP, FBcv, FBbt, WormBase); gene annotations via mygene.info; NGLY1 network primarily Monarch
# @depends: clean_neo4j.py, annot_GENE.py, ont_struct.py
# @author: <NAME>
# @email: <EMAIL>
# @date: 31 January 2018
# [0] Setup ----------------------------------------------------------------------
import numpy as np
import pandas as pd
import requests
import os
import warnings
# -- Atom notebook path settings --
# output_dir = 'dataout/' # path within Atom notebook
# import src.data_prep.clean_neo4j as neo4j # interface to query network
# import src.data_prep.annot_GENE as gene # interface to get gene annotations
# import src.data_prep.ont_struct as ont # functions to pull ontology data
# -- commpand line prompt settings --
output_dir = '../../dataout/' # path from command line prompt
import clean_neo4j as neo4j # interface to query network
# import annot_GENE as gene # interface to get gene annotations
import ont_struct as ont # functions to pull ontology data
# [1] Pull unique nodes from Nuria's graph -----------------------------------------------------------------
# node_file = 'https://raw.githubusercontent.com/NuriaQueralt/ngly1/master/neo4j-community-3.0.3/import/ngly1/ngly1_concepts.tsv' # if want to pull directly from the input file to neo4j
nodes = neo4j.get_nodes()
# <<< pull_ontsource(id, sep = ':') >>>
# @name: pull_ontsource
# @title: grab node ID source type
# @description: used to merge ids to ontology hierarhical levels from OLS; see `check_ontid_unique.py`)
# @input: *id* string, *sep*: separator between id source and source-specific id
# @output: stub containing the ont source for that particular id
# @example: pull_ontsource('ZFIN:ZDB-GENE-051023-7')
def pull_ontsource(id, sep = ':'):
split_id = id.split(sep)
if (len(split_id) > 1):
return split_id[0]
else:
warnings.warn("Cannot find the source for the id. Need to change `sep`?")
# (TEST): make sure I'm pulling all the ID types
# nodes['ont_source'] = nodes.node_id.apply(pull_ontsource)
# nodes.groupby('node_type').ont_source.value_counts()
# <<< get_ontid(nodes, drop_source = True) >>>
# @name: get_ontid
# @title: map node ID type to ont_id from OLS
# @description: used to merge ids to ontology hierarhical levels; see `check_ontid_unique.py`)
# NOTE: GENE merging taken care of by merging to annotations (translated via mygene.info) (NCBIGene, ZFIN, MGI, RGD, WormBase, Xenbase, FlyBase, UniProt, InterPro)
# NOTE: ignoring PHYS, GENO, VARI for now (PHYS requires too much work for the moment; GENO/VARI are low numbers and would require translating to genes then merging -- if that's even appropriate to lump mutation w/ original function)
# Also ignoring, for now:
# ANAT CL (only 7; not in UBERON)
# DISO ZP (271; not in OLS); disease DB: DOID (17), OMIM (6), MESH (4)
# TODO: revisit DISO when phenotypes/diseases are better delineated
# TODO: revisit GENE when genes/proteins are better delineated
# @input: *nodes*: dataframe containing node_id, *drop_source*: binary option to drop column containing the node ont_source
# @output: *nodes* dataframe
# @example: get_ontid(nodes)
def get_ontid(nodes, drop_source = True):
# ont_source (neo4j graph): ont_id (OLS ID)
id2ontid = {
# ANAT
'UBERON': 'UBERON',
# CHEM
'CHEBI': 'CHEBI',
# DISO
'MP': 'mp',
'FBbt': 'FBbt',
'HP': 'hp',
'WBPhenotype': 'wbphenotype',
'FBcv': 'FBcv'
}
nodes['ont_source'] = nodes.node_id.apply(pull_ontsource)
nodes['ont_id'] = nodes['ont_source'].map(id2ontid)
if (drop_source):
return nodes.drop('ont_source', axis = 1)
else:
return nodes
nodes = get_ontid(nodes)
# [2] Pull gene annotations -----------------------------------------------------------------
# 2 purposes: 1) translate node_id (for genes) to standarized NCBI Entrez gene names
# 2) for each gene, pull associated gene ontology (GO) terms
# [3] Create ontology hierarchical levels for *all* possible terms in base ontologies -----------------------------------------------------------------
# Get ontology structures + hierarchy for all ontologies
# OLS ids
ont_ids = {
'ANAT': ['UBERON'],
'CHEM': ['CHEBI'],
'DISO': ['FBcv', 'wbphenotype', 'FBbt', 'mp', 'hp'],
'GENE': ['go']
}
def create_ont_dict(ont_ids, output_dir, merge=False):
files = sorted(os.listdir(output_dir))
# little helper to see if file has already been generated.
def check_exists(files, ont_id, file_type):
file_name = [file_name for idx, file_name in enumerate(
files) if ont_id + '_' + file_type in file_name]
if(len(file_name) == 1):
return file_name[0]
elif (len(file_name) > 1):
return file_name[-1]
else:
return False
# create placeholder for term dictionaries
ont_terms = []
# create placeholder for term parents
parents = {}
# create placeholder for term ancestors
ancestors = []
for ont_type, ont_ids in ont_ids.items():
print('\n\n---' + ont_type + '---')
for ont_id in ont_ids:
print('\n*' + ont_id + '*')
# -- terms --
term_file = check_exists(files, ont_id, 'terms')
if(term_file):
# file already exists; read it in
print('reading in term file')
ont_term = pd.read_csv(output_dir + term_file, sep = '\t')
else:
# create file
print('creating term file')
ont_term = ont.get_terms(ont_id, save_terms=True, output_dir=output_dir)
# either way, append info for merging w/ nodes.
ont_term['ont_id'] = ont_id
ont_term['node_type'] = ont_type
ont_terms.append(ont_term)
# -- parents --
parent_file = check_exists(files, ont_id, 'parents')
if(parent_file):
# file already exists; read it in
print('reading in parents file')
parents[ont_id] = pd.read_csv(output_dir + parent_file, sep='\t', index_col=0)
else:
# create file
print('creating parents file')
parents[ont_id] = ont.find_parents(ont_terms[ont_id], ont_id, save_terms=True, output_dir=output_dir)
# -- ancestors --
hierarchy_file = check_exists(files, ont_id, 'ancestors')
if(hierarchy_file):
if (hierarchy_file.find('TEMP') > -1):
start_idx = int(hierarchy_file.split('TEMPidx')[1].replace('.tsv', ''))
# file already exists but is incomplete; read it in
print('reading in partial ancestor hierarchical structure file')
ancestor_partial = pd.read_csv(output_dir + hierarchy_file, sep='\t', index_col=0)
print('creating the rest of the ancestor hierarchical structures, starting at index ' + str(start_idx))
ancestor = ont.find_ancestors(
parents[ont_id], ont_id=ont_id, save_terms=True, output_dir=output_dir, start_idx = start_idx)
# combine the two halves
ancestor = pd.concat([ancestor, ancestor_partial], ignore_index=True)
ancestor.to_csv(output_dir + str(pd.Timestamp.today().strftime('%F')) + '_' + ont_id + '_ancestors_all.tsv', sep='\t')
else:
# file already exists; read it in
print('reading in ancestor hierarchical structure file')
ancestor = pd.read_csv(output_dir + hierarchy_file, sep='\t', index_col=0)
else:
# create file
print('creating hierarchical structure file')
ancestor = ont.find_ancestors(
parents[ont_id], ont_id=ont_id, save_terms=True, output_dir=output_dir)
# either way, append info for merging w/ nodes.
ancestor['ont_id'] = ont_id
ancestor['node_type'] = ont_type
ancestors.append(ancestor)
ont_terms = pd.concat(ont_terms, ignore_index=True)
# append the roots before converting to DataFrame
roots = get_root_ancestors(ont_terms)
ancestors.append(roots)
ancestors = pd.concat(ancestors, ignore_index=True)
if(merge):
# merge together ontology terms + hierarhical levels
merged =
|
pd.merge(ont_terms, ancestors, on=["node_type", "ont_id", "id"], how="outer", indicator=True)
|
pandas.merge
|
# LIBRARIES
# set up backend for ssh -x11 figures
import matplotlib
matplotlib.use('Agg')
# read and write
import os
import sys
import glob
import re
import fnmatch
import csv
import shutil
from datetime import datetime
# maths
import numpy as np
import pandas as pd
import math
import random
# miscellaneous
import warnings
import gc
import timeit
# sklearn
from sklearn.utils import resample
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, log_loss, roc_auc_score, \
accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, average_precision_score
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import KFold, PredefinedSplit, cross_validate
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Statistics
from scipy.stats import pearsonr, ttest_rel, norm
# Other tools for ensemble models building (<NAME>'s InnerCV class)
from hyperopt import fmin, tpe, space_eval, Trials, hp, STATUS_OK
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# CPUs
from multiprocessing import Pool
# GPUs
from GPUtil import GPUtil
# tensorflow
import tensorflow as tf
# keras
from keras_preprocessing.image import ImageDataGenerator, Iterator
from keras_preprocessing.image.utils import load_img, img_to_array, array_to_img
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError, AUC, BinaryAccuracy, Precision, Recall, \
TruePositives, FalsePositives, FalseNegatives, TrueNegatives
from tensorflow_addons.metrics import RSquare, F1Score
# Plots
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from bioinfokit import visuz
# Model's attention
from keract import get_activations, get_gradients_of_activations
from scipy.ndimage.interpolation import zoom
# Survival
from lifelines.utils import concordance_index
# Necessary to define MyCSVLogger
import collections
import csv
import io
import six
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
from tensorflow.keras.backend import eval
# Set display parameters
pd.set_option('display.max_rows', 200)
# CLASSES
class Basics:
"""
Root class herited by most other class. Includes handy helper functions
"""
def __init__(self):
# seeds for reproducibility
self.seed = 0
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# other parameters
self.path_data = '../data/'
self.folds = ['train', 'val', 'test']
self.n_CV_outer_folds = 10
self.outer_folds = [str(x) for x in list(range(self.n_CV_outer_folds))]
self.modes = ['', '_sd', '_str']
self.id_vars = ['id', 'eid', 'instance', 'outer_fold']
self.instances = ['0', '1', '1.5', '1.51', '1.52', '1.53', '1.54', '2', '3']
self.ethnicities_vars_forgot_Other = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other_ethnicity',
'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.ethnicities_vars = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.demographic_vars = ['Age', 'Sex'] + self.ethnicities_vars
self.names_model_parameters = ['target', 'organ', 'view', 'transformation', 'architecture', 'n_fc_layers',
'n_fc_nodes', 'optimizer', 'learning_rate', 'weight_decay', 'dropout_rate',
'data_augmentation_factor']
self.targets_regression = ['Age']
self.targets_binary = ['Sex']
self.models_types = ['', '_bestmodels']
self.dict_prediction_types = {'Age': 'regression', 'Sex': 'binary'}
self.dict_side_predictors = {'Age': ['Sex'] + self.ethnicities_vars_forgot_Other,
'Sex': ['Age'] + self.ethnicities_vars_forgot_Other}
self.organs = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal']
self.left_right_organs_views = ['Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees']
self.dict_organs_to_views = {'Brain': ['MRI'],
'Eyes': ['Fundus', 'OCT'],
'Arterial': ['Carotids'],
'Heart': ['MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody'],
'PhysicalActivity': ['FullWeek']}
self.dict_organsviews_to_transformations = \
{'Brain_MRI': ['SagittalRaw', 'SagittalReference', 'CoronalRaw', 'CoronalReference', 'TransverseRaw',
'TransverseReference'],
'Arterial_Carotids': ['Mixed', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis'],
'Heart_MRI': ['2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast', '4chambersRaw',
'4chambersContrast'],
'Musculoskeletal_Spine': ['Sagittal', 'Coronal'],
'Musculoskeletal_FullBody': ['Mixed', 'Figure', 'Skeleton', 'Flesh'],
'PhysicalActivity_FullWeek': ['GramianAngularField1minDifference', 'GramianAngularField1minSummation',
'MarkovTransitionField1min', 'RecurrencePlots1min']}
self.dict_organsviews_to_transformations.update(dict.fromkeys(['Eyes_Fundus', 'Eyes_OCT'], ['Raw']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Abdomen_Liver', 'Abdomen_Pancreas'], ['Raw', 'Contrast']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], ['MRI']))
self.organsviews_not_to_augment = []
self.organs_instances23 = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity']
self.organs_XWAS = \
['*', '*instances01', '*instances1.5x', '*instances23', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes',
'EyesFundus', 'EyesOCT', 'Hearing', 'Lungs', 'Arterial', 'ArterialPulseWaveAnalysis', 'ArterialCarotids',
'Heart', 'HeartECG', 'HeartMRI', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas', 'Musculoskeletal',
'MusculoskeletalSpine', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars', 'PhysicalActivity', 'Biochemistry', 'BiochemistryUrine', 'BiochemistryBlood',
'ImmuneSystem']
# Others
if '/Users/Alan/' in os.getcwd():
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/scripts/')
else:
os.chdir('/n/groups/patel/Alan/Aging/Medical_Images/scripts/')
gc.enable() # garbage collector
warnings.filterwarnings('ignore')
def _version_to_parameters(self, model_name):
parameters = {}
parameters_list = model_name.split('_')
for i, parameter in enumerate(self.names_model_parameters):
parameters[parameter] = parameters_list[i]
if len(parameters_list) > 11:
parameters['outer_fold'] = parameters_list[11]
return parameters
@staticmethod
def _parameters_to_version(parameters):
return '_'.join(parameters.values())
@staticmethod
def convert_string_to_boolean(string):
if string == 'True':
boolean = True
elif string == 'False':
boolean = False
else:
print('ERROR: string must be either \'True\' or \'False\'')
sys.exit(1)
return boolean
class Metrics(Basics):
"""
Helper class defining dictionaries of metrics and custom metrics
"""
def __init__(self):
# Parameters
Basics.__init__(self)
self.metrics_displayed_in_int = ['True-Positives', 'True-Negatives', 'False-Positives', 'False-Negatives']
self.metrics_needing_classpred = ['F1-Score', 'Binary-Accuracy', 'Precision', 'Recall']
self.dict_metrics_names_K = {'regression': ['RMSE'], # For now, R-Square is buggy. Try again in a few months.
'binary': ['ROC-AUC', 'PR-AUC', 'F1-Score', 'Binary-Accuracy', 'Precision',
'Recall', 'True-Positives', 'False-Positives', 'False-Negatives',
'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_metrics_names = {'regression': ['RMSE', 'MAE', 'R-Squared', 'Pearson-Correlation'],
'binary': ['ROC-AUC', 'F1-Score', 'PR-AUC', 'Binary-Accuracy', 'Sensitivity',
'Specificity', 'Precision', 'Recall', 'True-Positives', 'False-Positives',
'False-Negatives', 'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_losses_names = {'regression': 'MSE', 'binary': 'Binary-Crossentropy',
'multiclass': 'categorical_crossentropy'}
self.dict_main_metrics_names_K = {'Age': 'MAE', 'Sex': 'PR-AUC', 'imbalanced_binary_placeholder': 'PR-AUC'}
self.dict_main_metrics_names = {'Age': 'R-Squared', 'Sex': 'ROC-AUC',
'imbalanced_binary_placeholder': 'PR-AUC'}
self.main_metrics_modes = {'loss': 'min', 'R-Squared': 'max', 'Pearson-Correlation': 'max', 'RMSE': 'min',
'MAE': 'min', 'ROC-AUC': 'max', 'PR-AUC': 'max', 'F1-Score': 'max', 'C-Index': 'max',
'C-Index-difference': 'max'}
self.n_bootstrap_iterations = 1000
def rmse(y_true, y_pred):
return math.sqrt(mean_squared_error(y_true, y_pred))
def sensitivity_score(y, pred):
_, _, fn, tp = confusion_matrix(y, pred.round()).ravel()
return tp / (tp + fn)
def specificity_score(y, pred):
tn, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn / (tn + fp)
def true_positives_score(y, pred):
_, _, _, tp = confusion_matrix(y, pred.round()).ravel()
return tp
def false_positives_score(y, pred):
_, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return fp
def false_negatives_score(y, pred):
_, _, fn, _ = confusion_matrix(y, pred.round()).ravel()
return fn
def true_negatives_score(y, pred):
tn, _, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn
self.dict_metrics_sklearn = {'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'RMSE': rmse,
'Pearson-Correlation': pearsonr,
'R-Squared': r2_score,
'Binary-Crossentropy': log_loss,
'ROC-AUC': roc_auc_score,
'F1-Score': f1_score,
'PR-AUC': average_precision_score,
'Binary-Accuracy': accuracy_score,
'Sensitivity': sensitivity_score,
'Specificity': specificity_score,
'Precision': precision_score,
'Recall': recall_score,
'True-Positives': true_positives_score,
'False-Positives': false_positives_score,
'False-Negatives': false_negatives_score,
'True-Negatives': true_negatives_score}
def _bootstrap(self, data, function):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
results.append(function(data_i['y'], data_i['pred']))
return np.mean(results), np.std(results)
class PreprocessingMain(Basics):
"""
This class executes the code for step 01. It preprocesses the main dataframe by:
- reformating the rows and columns
- splitting the dataset into folds for the future cross validations
- imputing key missing data
- adding a new UKB instance for physical activity data
- formating the demographics columns (age, sex and ethnicity)
- reformating the dataframe so that different instances of the same participant are treated as different rows
- saving the dataframe
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
def _add_outer_folds(self):
outer_folds_split = pd.read_csv(self.path_data + 'All_eids.csv')
outer_folds_split.rename(columns={'fold': 'outer_fold'}, inplace=True)
outer_folds_split['eid'] = outer_folds_split['eid'].astype('str')
outer_folds_split['outer_fold'] = outer_folds_split['outer_fold'].astype('str')
outer_folds_split.set_index('eid', inplace=True)
self.data_raw = self.data_raw.join(outer_folds_split)
def _impute_missing_ecg_instances(self):
data_ecgs = pd.read_csv('/n/groups/patel/Alan/Aging/TimeSeries/scripts/age_analysis/missing_samples.csv')
data_ecgs['eid'] = data_ecgs['eid'].astype(str)
data_ecgs['instance'] = data_ecgs['instance'].astype(str)
for _, row in data_ecgs.iterrows():
self.data_raw.loc[row['eid'], 'Date_attended_center_' + row['instance']] = row['observation_date']
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_sex(self):
# Use genetic sex when available
self.data_raw['Sex_genetic'][self.data_raw['Sex_genetic'].isna()] = \
self.data_raw['Sex'][self.data_raw['Sex_genetic'].isna()]
self.data_raw.drop(['Sex'], axis=1, inplace=True)
self.data_raw.rename(columns={'Sex_genetic': 'Sex'}, inplace=True)
self.data_raw.dropna(subset=['Sex'], inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = \
self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _encode_ethnicity(self):
# Fill NAs for ethnicity on instance 0 if available in other instances
eids_missing_ethnicity = self.data_raw['eid'][self.data_raw['Ethnicity'].isna()]
for eid in eids_missing_ethnicity:
sample = self.data_raw.loc[eid, :]
if not math.isnan(sample['Ethnicity_1']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_1']
elif not math.isnan(sample['Ethnicity_2']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_2']
self.data_raw.drop(['Ethnicity_1', 'Ethnicity_2'], axis=1, inplace=True)
# One hot encode ethnicity
dict_ethnicity_codes = {'1': 'Ethnicity.White', '1001': 'Ethnicity.British', '1002': 'Ethnicity.Irish',
'1003': 'Ethnicity.White_Other',
'2': 'Ethnicity.Mixed', '2001': 'Ethnicity.White_and_Black_Caribbean',
'2002': 'Ethnicity.White_and_Black_African',
'2003': 'Ethnicity.White_and_Asian', '2004': 'Ethnicity.Mixed_Other',
'3': 'Ethnicity.Asian', '3001': 'Ethnicity.Indian', '3002': 'Ethnicity.Pakistani',
'3003': 'Ethnicity.Bangladeshi', '3004': 'Ethnicity.Asian_Other',
'4': 'Ethnicity.Black', '4001': 'Ethnicity.Caribbean', '4002': 'Ethnicity.African',
'4003': 'Ethnicity.Black_Other',
'5': 'Ethnicity.Chinese',
'6': 'Ethnicity.Other_ethnicity',
'-1': 'Ethnicity.Do_not_know',
'-3': 'Ethnicity.Prefer_not_to_answer',
'-5': 'Ethnicity.NA'}
self.data_raw['Ethnicity'] = self.data_raw['Ethnicity'].fillna(-5).astype(int).astype(str)
ethnicities = pd.get_dummies(self.data_raw['Ethnicity'])
self.data_raw.drop(['Ethnicity'], axis=1, inplace=True)
ethnicities.rename(columns=dict_ethnicity_codes, inplace=True)
ethnicities['Ethnicity.White'] = ethnicities['Ethnicity.White'] + ethnicities['Ethnicity.British'] + \
ethnicities['Ethnicity.Irish'] + ethnicities['Ethnicity.White_Other']
ethnicities['Ethnicity.Mixed'] = ethnicities['Ethnicity.Mixed'] + \
ethnicities['Ethnicity.White_and_Black_Caribbean'] + \
ethnicities['Ethnicity.White_and_Black_African'] + \
ethnicities['Ethnicity.White_and_Asian'] + \
ethnicities['Ethnicity.Mixed_Other']
ethnicities['Ethnicity.Asian'] = ethnicities['Ethnicity.Asian'] + ethnicities['Ethnicity.Indian'] + \
ethnicities['Ethnicity.Pakistani'] + ethnicities['Ethnicity.Bangladeshi'] + \
ethnicities['Ethnicity.Asian_Other']
ethnicities['Ethnicity.Black'] = ethnicities['Ethnicity.Black'] + ethnicities['Ethnicity.Caribbean'] + \
ethnicities['Ethnicity.African'] + ethnicities['Ethnicity.Black_Other']
ethnicities['Ethnicity.Other'] = ethnicities['Ethnicity.Other_ethnicity'] + \
ethnicities['Ethnicity.Do_not_know'] + \
ethnicities['Ethnicity.Prefer_not_to_answer'] + \
ethnicities['Ethnicity.NA']
self.data_raw = self.data_raw.join(ethnicities)
def generate_data(self):
# Preprocessing
dict_UKB_fields_to_names = {'34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3',
'31-0.0': 'Sex', '22001-0.0': 'Sex_genetic', '21000-0.0': 'Ethnicity',
'21000-1.0': 'Ethnicity_1', '21000-2.0': 'Ethnicity_2',
'22414-2.0': 'Abdominal_images_quality'}
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv',
usecols=['eid', '31-0.0', '22001-0.0', '21000-0.0', '21000-1.0', '21000-2.0',
'34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0', '22414-2.0'])
# Formatting
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
self._add_outer_folds()
self._impute_missing_ecg_instances()
self._add_physicalactivity_instances()
self._compute_sex()
self._compute_age()
self._encode_ethnicity()
# Concatenate the data from the different instances
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw[['eid', 'outer_fold', 'Age_' + i, 'Sex'] + self.ethnicities_vars +
['Abdominal_images_quality']].dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
df_i.rename(columns={'Age_' + i: 'Age'}, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[self.id_vars + self.demographic_vars + ['Abdominal_images_quality']]
if i != '2':
df_i['Abdominal_images_quality'] = np.nan # not defined for instance 3, not relevant for instances 0, 1
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Save age as a float32 instead of float64
self.data_features['Age'] = np.float32(self.data_features['Age'])
# Shuffle the rows before saving the dataframe
self.data_features = self.data_features.sample(frac=1)
# Generate dataframe for eids pipeline as opposed to instances pipeline
self.data_features_eids = self.data_features[self.data_features.instance == '0']
self.data_features_eids['instance'] = '*'
self.data_features_eids['id'] = [ID.replace('_0', '_*') for ID in self.data_features_eids['id'].values]
def save_data(self):
self.data_features.to_csv(self.path_data + 'data-features_instances.csv', index=False)
self.data_features_eids.to_csv(self.path_data + 'data-features_eids.csv', index=False)
class PreprocessingImagesIDs(Basics):
"""
Splits the different images datasets into folds for the future cross validation
"""
def __init__(self):
Basics.__init__(self)
# Instances 2 and 3 datasets (most medical images, mostly medical images)
self.instances23_eids = None
self.HEART_EIDs = None
self.heart_eids = None
self.FOLDS_23_EIDS = None
def _load_23_eids(self):
data_features = pd.read_csv(self.path_data + 'data-features_instances.csv')
images_eids = data_features['eid'][data_features['instance'].isin([2, 3])]
self.images_eids = list(set(images_eids))
def _load_heart_eids(self):
# IDs already used in Heart videos
HEART_EIDS = {}
heart_eids = []
for i in range(10):
# Important: The i's data fold is used as *validation* fold for outer fold i.
data_i = pd.read_csv(
"/n/groups/patel/JbProst/Heart/Data/FoldsAugmented/data-features_Heart_20208_Augmented_Age_val_" + str(
i) + ".csv")
HEART_EIDS[i] = list(set([int(str(ID)[:7]) for ID in data_i['eid']]))
heart_eids = heart_eids + HEART_EIDS[i]
self.HEART_EIDS = HEART_EIDS
self.heart_eids = heart_eids
def _split_23_eids_folds(self):
self._load_23_eids()
self._load_heart_eids()
# List extra images ids, and split them between the different folds.
extra_eids = [eid for eid in self.images_eids if eid not in self.heart_eids]
random.shuffle(extra_eids)
n_samples = len(extra_eids)
n_samples_by_fold = n_samples / self.n_CV_outer_folds
FOLDS_EXTRAEIDS = {}
FOLDS_EIDS = {}
for outer_fold in self.outer_folds:
FOLDS_EXTRAEIDS[outer_fold] = \
extra_eids[int((int(outer_fold)) * n_samples_by_fold):int((int(outer_fold) + 1) * n_samples_by_fold)]
FOLDS_EIDS[outer_fold] = self.HEART_EIDS[int(outer_fold)] + FOLDS_EXTRAEIDS[outer_fold]
self.FOLDS_23_EIDS = FOLDS_EIDS
def _save_23_eids_folds(self):
for outer_fold in self.outer_folds:
with open(self.path_data + 'instances23_eids_' + outer_fold + '.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(self.FOLDS_23_EIDS[outer_fold])
def generate_eids_splits(self):
print("Generating eids split for organs on instances 2 and 3")
self._split_23_eids_folds()
self._save_23_eids_folds()
class PreprocessingFolds(Metrics):
"""
Splits the data into training, validation and testing sets for all CV folds
"""
def __init__(self, target, organ, regenerate_data):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.list_ids_per_view_transformation = None
# Check if these folds have already been generated
if not regenerate_data:
if len(glob.glob(self.path_data + 'data-features_' + organ + '_*_' + target + '_*.csv')) > 0:
print("Error: The files already exist! Either change regenerate_data to True or delete the previous"
" version.")
sys.exit(1)
self.side_predictors = self.dict_side_predictors[target]
self.variables_to_normalize = self.side_predictors
if target in self.targets_regression:
self.variables_to_normalize.append(target)
self.dict_image_quality_col = {'Liver': 'Abdominal_images_quality'}
self.dict_image_quality_col.update(
dict.fromkeys(['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal', 'PhysicalActivity'],
None))
self.image_quality_col = self.dict_image_quality_col[organ]
self.views = self.dict_organs_to_views[organ]
self.list_ids = None
self.list_ids_per_view = {}
self.data = None
self.EIDS = None
self.EIDS_per_view = {'train': {}, 'val': {}, 'test': {}}
self.data_fold = None
def _get_list_ids(self):
self.list_ids_per_view_transformation = {}
list_ids = []
# if different views are available, take the union of the ids
for view in self.views:
self.list_ids_per_view_transformation[view] = {}
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
list_ids_transformation = []
path = '../images/' + self.organ + '/' + view + '/' + transformation + '/'
# for paired organs, take the unions of the ids available on the right and the left sides
if self.organ + '_' + view in self.left_right_organs_views:
for side in ['right', 'left']:
list_ids_transformation += os.listdir(path + side + '/')
list_ids_transformation = np.unique(list_ids_transformation).tolist()
else:
list_ids_transformation += os.listdir(path)
self.list_ids_per_view_transformation[view][transformation] = \
[im.replace('.jpg', '') for im in list_ids_transformation]
list_ids += self.list_ids_per_view_transformation[view][transformation]
self.list_ids = np.unique(list_ids).tolist()
self.list_ids.sort()
def _filter_and_format_data(self):
"""
Clean the data before it can be split between the rows
"""
cols_data = self.id_vars + self.demographic_vars
if self.image_quality_col is not None:
cols_data.append(self.dict_image_quality_col[self.organ])
data = pd.read_csv(self.path_data + 'data-features_instances.csv', usecols=cols_data)
data.rename(columns={self.dict_image_quality_col[self.organ]: 'Data_quality'}, inplace=True)
for col_name in self.id_vars:
data[col_name] = data[col_name].astype(str)
data.set_index('id', drop=False, inplace=True)
if self.image_quality_col is not None:
data = data[data['Data_quality'] != np.nan]
data.drop('Data_quality', axis=1, inplace=True)
# get rid of samples with NAs
data.dropna(inplace=True)
# list the samples' ids for which images are available
data = data.loc[self.list_ids]
self.data = data
def _split_data(self):
# Generate the data for each outer_fold
for i, outer_fold in enumerate(self.outer_folds):
of_val = outer_fold
of_test = str((int(outer_fold) + 1) % len(self.outer_folds))
DATA = {
'train': self.data[~self.data['outer_fold'].isin([of_val, of_test])],
'val': self.data[self.data['outer_fold'] == of_val],
'test': self.data[self.data['outer_fold'] == of_test]
}
# Generate the data for the different views and transformations
for view in self.views:
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
print('Splitting data for view ' + view + ', and transformation ' + transformation)
DF = {}
for fold in self.folds:
idx = DATA[fold]['id'].isin(self.list_ids_per_view_transformation[view][transformation]).values
DF[fold] = DATA[fold].iloc[idx, :]
# compute values for scaling of variables
normalizing_values = {}
for var in self.variables_to_normalize:
var_mean = DF['train'][var].mean()
if len(DF['train'][var].unique()) < 2:
print('Variable ' + var + ' has a single value in fold ' + outer_fold +
'. Using 1 as std for normalization.')
var_std = 1
else:
var_std = DF['train'][var].std()
normalizing_values[var] = {'mean': var_mean, 'std': var_std}
# normalize the variables
for fold in self.folds:
for var in self.variables_to_normalize:
DF[fold][var + '_raw'] = DF[fold][var]
DF[fold][var] = (DF[fold][var] - normalizing_values[var]['mean']) \
/ normalizing_values[var]['std']
# report issue if NAs were detected (most likely comes from a sample whose id did not match)
n_mismatching_samples = DF[fold].isna().sum().max()
if n_mismatching_samples > 0:
print(DF[fold][DF[fold].isna().any(axis=1)])
print('/!\\ WARNING! ' + str(n_mismatching_samples) + ' ' + fold + ' images ids out of ' +
str(len(DF[fold].index)) + ' did not match the dataframe!')
# save the data
DF[fold].to_csv(self.path_data + 'data-features_' + self.organ + '_' + view + '_' +
transformation + '_' + self.target + '_' + fold + '_' + outer_fold + '.csv',
index=False)
print('For outer_fold ' + outer_fold + ', the ' + fold + ' fold has a sample size of ' +
str(len(DF[fold].index)))
def generate_folds(self):
self._get_list_ids()
self._filter_and_format_data()
self._split_data()
class PreprocessingSurvival(Basics):
"""
Preprocesses the main dataframe for survival purposes.
Mirrors the PreprocessingMain class, but computes Death time and FollowTime for the future survival analysis
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
self.survival_vars = ['FollowUpTime', 'Death']
def _preprocessing(self):
usecols = ['eid', '40000-0.0', '34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0']
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=usecols)
dict_UKB_fields_to_names = {'40000-0.0': 'FollowUpDate', '34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3'}
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
# Format survival data
self.data_raw['Death'] = ~self.data_raw['FollowUpDate'].isna()
self.data_raw['FollowUpDate'][self.data_raw['FollowUpDate'].isna()] = '2020-04-27'
self.data_raw['FollowUpDate'] = self.data_raw['FollowUpDate'].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
assert ('FollowUpDate.1' not in self.data_raw.columns)
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw.dropna(subset=['Year_of_birth'], inplace=True)
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpDate'] - self.data_raw[
'Date_attended_center_' + i]
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpTime_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth', 'FollowUpDate'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _concatenate_instances(self):
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw.dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
dict_names = {}
features = ['Age', 'FollowUpTime']
for feature in features:
dict_names[feature + '_' + i] = feature
self.dict_names = dict_names
df_i.rename(columns=dict_names, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[['id', 'eid', 'instance'] + self.survival_vars]
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Add * instance for eids
survival_eids = self.data_features[self.data_features['instance'] == '0']
survival_eids['instance'] = '*'
survival_eids['id'] = survival_eids['eid'] + '_' + survival_eids['instance']
self.data_features = self.data_features.append(survival_eids)
def generate_data(self):
# Formatting
self._preprocessing()
self._add_physicalactivity_instances()
self._compute_age()
self._concatenate_instances()
# save data
self.data_features.to_csv('../data/data_survival.csv', index=False)
class MyImageDataGenerator(Basics, Sequence, ImageDataGenerator):
"""
Helper class: custom data generator for images.
It handles several custom features such as:
- provides batches of not only images, but also the scalar data (e.g demographics) that correspond to it
- it performs random shuffling while making sure that no leftover data (the remainder of the modulo batch size)
is being unused
- it can handle paired data for paired organs (e.g left/right eyes)
"""
def __init__(self, target=None, organ=None, view=None, data_features=None, n_samples_per_subepoch=None,
batch_size=None, training_mode=None, side_predictors=None, dir_images=None, images_width=None,
images_height=None, data_augmentation=False, data_augmentation_factor=None, seed=None):
# Parameters
Basics.__init__(self)
self.target = target
if target in self.targets_regression:
self.labels = data_features[target]
else:
self.labels = data_features[target + '_raw']
self.organ = organ
self.view = view
self.training_mode = training_mode
self.data_features = data_features
self.list_ids = data_features.index.values
self.batch_size = batch_size
# for paired organs, take twice fewer ids (two images for each id), and add organ_side as side predictor
if organ + '_' + view in self.left_right_organs_views:
self.data_features['organ_side'] = np.nan
self.n_ids_batch = batch_size // 2
else:
self.n_ids_batch = batch_size
if self.training_mode & (n_samples_per_subepoch is not None): # during training, 1 epoch = number of samples
self.steps = math.ceil(n_samples_per_subepoch / batch_size)
else: # during prediction and other tasks, an epoch is defined as all the samples being seen once and only once
self.steps = math.ceil(len(self.list_ids) / self.n_ids_batch)
# learning_rate_patience
if n_samples_per_subepoch is not None:
self.n_subepochs_per_epoch = math.ceil(len(self.data_features.index) / n_samples_per_subepoch)
# initiate the indices and shuffle the ids
self.shuffle = training_mode # Only shuffle if the model is being trained. Otherwise no need.
self.indices = np.arange(len(self.list_ids))
self.idx_end = 0 # Keep track of last indice to permute indices accordingly at the end of epoch.
if self.shuffle:
np.random.shuffle(self.indices)
# Input for side NN and CNN
self.side_predictors = side_predictors
self.dir_images = dir_images
self.images_width = images_width
self.images_height = images_height
# Data augmentation
self.data_augmentation = data_augmentation
self.data_augmentation_factor = data_augmentation_factor
self.seed = seed
# Parameters for data augmentation: (rotation range, width shift range, height shift range, zoom range)
self.augmentation_parameters = \
pd.DataFrame(index=['Brain_MRI', 'Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Heart_MRI',
'Abdomen_Liver', 'Abdomen_Pancreas', 'Musculoskeletal_Spine', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees', 'Musculoskeletal_FullBody', 'PhysicalActivity_FullWeek',
'PhysicalActivity_Walking'],
columns=['rotation', 'width_shift', 'height_shift', 'zoom'])
self.augmentation_parameters.loc['Brain_MRI', :] = [10, 0.05, 0.1, 0.0]
self.augmentation_parameters.loc['Eyes_Fundus', :] = [20, 0.02, 0.02, 0]
self.augmentation_parameters.loc['Eyes_OCT', :] = [30, 0.1, 0.2, 0]
self.augmentation_parameters.loc[['Arterial_Carotids'], :] = [0, 0.2, 0.0, 0.0]
self.augmentation_parameters.loc[['Heart_MRI', 'Abdomen_Liver', 'Abdomen_Pancreas',
'Musculoskeletal_Spine'], :] = [10, 0.1, 0.1, 0.0]
self.augmentation_parameters.loc[['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], :] = [10, 0.1, 0.1, 0.1]
self.augmentation_parameters.loc[['Musculoskeletal_FullBody'], :] = [10, 0.05, 0.02, 0.0]
self.augmentation_parameters.loc[['PhysicalActivity_FullWeek'], :] = [0, 0, 0, 0.0]
organ_view = organ + '_' + view
ImageDataGenerator.__init__(self, rescale=1. / 255.,
rotation_range=self.augmentation_parameters.loc[organ_view, 'rotation'],
width_shift_range=self.augmentation_parameters.loc[organ_view, 'width_shift'],
height_shift_range=self.augmentation_parameters.loc[organ_view, 'height_shift'],
zoom_range=self.augmentation_parameters.loc[organ_view, 'zoom'])
def __len__(self):
return self.steps
def on_epoch_end(self):
_ = gc.collect()
self.indices = np.concatenate([self.indices[self.idx_end:], self.indices[:self.idx_end]])
def _generate_image(self, path_image):
img = load_img(path_image, target_size=(self.images_width, self.images_height), color_mode='rgb')
Xi = img_to_array(img)
if hasattr(img, 'close'):
img.close()
if self.data_augmentation:
params = self.get_random_transform(Xi.shape)
Xi = self.apply_transform(Xi, params)
Xi = self.standardize(Xi)
return Xi
def _data_generation(self, list_ids_batch):
# initialize empty matrices
n_samples_batch = min(len(list_ids_batch), self.batch_size)
X = np.empty((n_samples_batch, self.images_width, self.images_height, 3)) * np.nan
x = np.empty((n_samples_batch, len(self.side_predictors))) * np.nan
y = np.empty((n_samples_batch, 1)) * np.nan
# fill the matrices sample by sample
for i, ID in enumerate(list_ids_batch):
y[i] = self.labels[ID]
x[i] = self.data_features.loc[ID, self.side_predictors]
if self.organ + '_' + self.view in self.left_right_organs_views:
if i % 2 == 0:
path = self.dir_images + 'right/'
x[i][-1] = 0
else:
path = self.dir_images + 'left/'
x[i][-1] = 1
if not os.path.exists(path + ID + '.jpg'):
path = path.replace('/right/', '/left/') if i % 2 == 0 else path.replace('/left/', '/right/')
x[i][-1] = 1 - x[i][-1]
else:
path = self.dir_images
X[i, :, :, :] = self._generate_image(path_image=path + ID + '.jpg')
return [X, x], y
def __getitem__(self, index):
# Select the indices
idx_start = (index * self.n_ids_batch) % len(self.list_ids)
idx_end = (((index + 1) * self.n_ids_batch) - 1) % len(self.list_ids) + 1
if idx_start > idx_end:
# If this happens outside of training, that is a mistake
if not self.training_mode:
print('\nERROR: Outside of training, every sample should only be predicted once!')
sys.exit(1)
# Select part of the indices from the end of the epoch
indices = self.indices[idx_start:]
# Generate a new set of indices
# print('\nThe end of the data was reached within this batch, looping.')
if self.shuffle:
np.random.shuffle(self.indices)
# Complete the batch with samples from the new indices
indices = np.concatenate([indices, self.indices[:idx_end]])
else:
indices = self.indices[idx_start: idx_end]
if idx_end == len(self.list_ids) & self.shuffle:
# print('\nThe end of the data was reached. Shuffling for the next epoch.')
np.random.shuffle(self.indices)
# Keep track of last indice for end of subepoch
self.idx_end = idx_end
# Select the corresponding ids
list_ids_batch = [self.list_ids[i] for i in indices]
# For paired organs, two images (left, right eyes) are selected for each id.
if self.organ + '_' + self.view in self.left_right_organs_views:
list_ids_batch = [ID for ID in list_ids_batch for _ in ('right', 'left')]
return self._data_generation(list_ids_batch)
class MyCSVLogger(Callback):
"""
Custom CSV Logger callback class for Keras training: append to existing file if can be found. Allows to keep track
of training over several jobs.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
Callback.__init__(self)
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename, mode + self.file_flags, **self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch', 'learning_rate'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch, 'learning_rate': eval(self.model.optimizer.lr)})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class MyModelCheckpoint(ModelCheckpoint):
"""
Custom checkpoint callback class for Keras training. Handles a baseline performance.
"""
def __init__(self, filepath, monitor='val_loss', baseline=-np.Inf, verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', save_freq='epoch'):
# Parameters
ModelCheckpoint.__init__(self, filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only,
save_weights_only=save_weights_only, mode=mode, save_freq=save_freq)
if mode == 'min':
self.monitor_op = np.less
self.best = baseline
elif mode == 'max':
self.monitor_op = np.greater
self.best = baseline
else:
print('Error. mode for metric must be either min or max')
sys.exit(1)
class DeepLearning(Metrics):
"""
Core helper class to train models. Used to:
- build the data generators
- generate the CNN architectures
- load the weights
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, debug_mode=False):
# Initialization
Metrics.__init__(self)
tf.random.set_seed(self.seed)
# Model's version
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = int(n_fc_layers)
self.n_fc_nodes = int(n_fc_nodes)
self.optimizer = optimizer
self.learning_rate = float(learning_rate)
self.weight_decay = float(weight_decay)
self.dropout_rate = float(dropout_rate)
self.data_augmentation_factor = float(data_augmentation_factor)
self.outer_fold = None
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# NNet's architecture and weights
self.side_predictors = self.dict_side_predictors[target]
if self.organ + '_' + self.view in self.left_right_organs_views:
self.side_predictors.append('organ_side')
self.dict_final_activations = {'regression': 'linear', 'binary': 'sigmoid', 'multiclass': 'softmax',
'saliency': 'linear'}
self.path_load_weights = None
self.keras_weights = None
# Generators
self.debug_mode = debug_mode
self.debug_fraction = 0.005
self.DATA_FEATURES = {}
self.mode = None
self.n_cpus = len(os.sched_getaffinity(0))
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
# define dictionary to fit the architecture's input size to the images sizes (take min (height, width))
self.dict_organ_view_transformation_to_image_size = {
'Eyes_Fundus_Raw': (316, 316), # initial size (1388, 1388)
'Eyes_OCT_Raw': (312, 320), # initial size (500, 512)
'Musculoskeletal_Spine_Sagittal': (466, 211), # initial size (1513, 684)
'Musculoskeletal_Spine_Coronal': (315, 313), # initial size (724, 720)
'Musculoskeletal_Hips_MRI': (329, 303), # initial size (626, 680)
'Musculoskeletal_Knees_MRI': (347, 286) # initial size (851, 700)
}
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference'],
(316, 316))) # initial size (88, 88)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis', 'Arterial_Carotids_CIMT120',
'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis'],
(337, 291))) # initial size (505, 436)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast'],
(316, 316))) # initial size (200, 200)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast'], (288, 364))) # initial size (288, 364)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast'], (288, 350))) # initial size (288, 350)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Musculoskeletal_FullBody_Figure', 'Musculoskeletal_FullBody_Skeleton',
'Musculoskeletal_FullBody_Flesh', 'Musculoskeletal_FullBody_Mixed'],
(541, 181))) # initial size (811, 272)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min',
'PhysicalActivity_FullWeek_RecurrencePlots1min'],
(316, 316))) # initial size (316, 316)
self.dict_architecture_to_image_size = {'MobileNet': (224, 224), 'MobileNetV2': (224, 224),
'NASNetMobile': (224, 224), 'NASNetLarge': (331, 331)}
if self.architecture in ['MobileNet', 'MobileNetV2', 'NASNetMobile', 'NASNetLarge']:
self.image_width, self.image_height = self.dict_architecture_to_image_size[architecture]
else:
self.image_width, self.image_height = \
self.dict_organ_view_transformation_to_image_size[organ + '_' + view + '_' + transformation]
# define dictionary of batch sizes to fit as many samples as the model's architecture allows
self.dict_batch_sizes = {
# Default, applies to all images with resized input ~100,000 pixels
'Default': {'VGG16': 32, 'VGG19': 32, 'DenseNet121': 16, 'DenseNet169': 16, 'DenseNet201': 16,
'Xception': 32, 'InceptionV3': 32, 'InceptionResNetV2': 8, 'ResNet50': 32, 'ResNet101': 16,
'ResNet152': 16, 'ResNet50V2': 32, 'ResNet101V2': 16, 'ResNet152V2': 16, 'ResNeXt50': 4,
'ResNeXt101': 8, 'EfficientNetB7': 4,
'MobileNet': 128, 'MobileNetV2': 64, 'NASNetMobile': 64, 'NASNetLarge': 4}}
# Define batch size
if organ + '_' + view in self.dict_batch_sizes.keys():
randoself.batch_size = self.dict_batch_sizes[organ + '_' + view][architecture]
else:
self.batch_size = self.dict_batch_sizes['Default'][architecture]
# double the batch size for the teslaM40 cores that have bigger memory
if len(GPUtil.getGPUs()) > 0: # make sure GPUs are available (not truesometimes for debugging)
if GPUtil.getGPUs()[0].memoryTotal > 20000:
self.batch_size *= 2
# Define number of ids per batch (twice fewer for paired organs, because left and right samples)
self.n_ids_batch = self.batch_size
if organ + '_' + view in self.left_right_organs_views:
self.n_ids_batch //= 2
# Define number of samples per subepoch
if debug_mode:
self.n_samples_per_subepoch = self.batch_size * 4
else:
self.n_samples_per_subepoch = 32768
if organ + '_' + view in self.left_right_organs_views:
self.n_samples_per_subepoch //= 2
# dict to decide which field is used to generate the ids when several targets share the same ids
self.dict_target_to_ids = dict.fromkeys(['Age', 'Sex'], 'Age')
# Note: R-Squared and F1-Score are not available, because their batch based values are misleading.
# For some reason, Sensitivity and Specificity are not available either. Might implement later.
self.dict_losses_K = {'MSE': MeanSquaredError(name='MSE'),
'Binary-Crossentropy': BinaryCrossentropy(name='Binary-Crossentropy')}
self.dict_metrics_K = {'R-Squared': RSquare(name='R-Squared', y_shape=(1,)),
'RMSE': RootMeanSquaredError(name='RMSE'),
'F1-Score': F1Score(name='F1-Score', num_classes=1, dtype=tf.float32),
'ROC-AUC': AUC(curve='ROC', name='ROC-AUC'),
'PR-AUC': AUC(curve='PR', name='PR-AUC'),
'Binary-Accuracy': BinaryAccuracy(name='Binary-Accuracy'),
'Precision': Precision(name='Precision'),
'Recall': Recall(name='Recall'),
'True-Positives': TruePositives(name='True-Positives'),
'False-Positives': FalsePositives(name='False-Positives'),
'False-Negatives': FalseNegatives(name='False-Negatives'),
'True-Negatives': TrueNegatives(name='True-Negatives')}
# Metrics
self.prediction_type = self.dict_prediction_types[target]
self.loss_name = self.dict_losses_names[self.prediction_type]
self.loss_function = self.dict_losses_K[self.loss_name]
self.main_metric_name = self.dict_main_metrics_names_K[target]
self.main_metric_mode = self.main_metrics_modes[self.main_metric_name]
self.main_metric = self.dict_metrics_K[self.main_metric_name]
self.metrics_names = [self.main_metric_name]
self.metrics = [self.dict_metrics_K[metric_name] for metric_name in self.metrics_names]
# Optimizers
self.optimizers = {'Adam': Adam, 'RMSprop': RMSprop, 'Adadelta': Adadelta}
# Model
self.model = None
@staticmethod
def _append_ext(fn):
return fn + ".jpg"
def _load_data_features(self):
for fold in self.folds:
self.DATA_FEATURES[fold] = pd.read_csv(
self.path_data + 'data-features_' + self.organ + '_' + self.view + '_' + self.transformation + '_' +
self.dict_target_to_ids[self.target] + '_' + fold + '_' + self.outer_fold + '.csv')
for col_name in self.id_vars:
self.DATA_FEATURES[fold][col_name] = self.DATA_FEATURES[fold][col_name].astype(str)
self.DATA_FEATURES[fold].set_index('id', drop=False, inplace=True)
def _take_subset_to_debug(self):
for fold in self.folds:
# use +1 or +2 to test the leftovers pipeline
leftovers_extra = {'train': 0, 'val': 1, 'test': 2}
n_batches = 2
n_limit_fold = leftovers_extra[fold] + self.batch_size * n_batches
self.DATA_FEATURES[fold] = self.DATA_FEATURES[fold].iloc[:n_limit_fold, :]
def _generate_generators(self, DATA_FEATURES):
GENERATORS = {}
for fold in self.folds:
# do not generate a generator if there are no samples (can happen for leftovers generators)
if fold not in DATA_FEATURES.keys():
continue
# parameters
training_mode = True if self.mode == 'model_training' else False
if (fold == 'train') & (self.mode == 'model_training') & \
(self.organ + '_' + self.view not in self.organsviews_not_to_augment):
data_augmentation = True
else:
data_augmentation = False
# define batch size for testing: data is split between a part that fits in batches, and leftovers
if self.mode == 'model_testing':
if self.organ + '_' + self.view in self.left_right_organs_views:
n_samples = len(DATA_FEATURES[fold].index) * 2
else:
n_samples = len(DATA_FEATURES[fold].index)
batch_size_fold = min(self.batch_size, n_samples)
else:
batch_size_fold = self.batch_size
if (fold == 'train') & (self.mode == 'model_training'):
n_samples_per_subepoch = self.n_samples_per_subepoch
else:
n_samples_per_subepoch = None
# generator
GENERATORS[fold] = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=DATA_FEATURES[fold], n_samples_per_subepoch=n_samples_per_subepoch,
batch_size=batch_size_fold, training_mode=training_mode,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=data_augmentation,
data_augmentation_factor=self.data_augmentation_factor, seed=self.seed)
return GENERATORS
def _generate_class_weights(self):
if self.dict_prediction_types[self.target] == 'binary':
self.class_weights = {}
counts = self.DATA_FEATURES['train'][self.target + '_raw'].value_counts()
n_total = counts.sum()
# weighting the samples for each class inversely proportional to their prevalence, with order of magnitude 1
for i in counts.index.values:
self.class_weights[i] = n_total / (counts.loc[i] * len(counts.index))
def _generate_cnn(self):
# define the arguments
# take special initial weights for EfficientNetB7 (better)
if (self.architecture == 'EfficientNetB7') & (self.keras_weights == 'imagenet'):
w = 'noisy-student'
else:
w = self.keras_weights
kwargs = {"include_top": False, "weights": w, "input_shape": (self.image_width, self.image_height, 3)}
if self.architecture in ['ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'ResNeXt50', 'ResNeXt101']:
import tensorflow.keras
kwargs.update(
{"backend": tensorflow.keras.backend, "layers": tensorflow.keras.layers,
"models": tensorflow.keras.models, "utils": tensorflow.keras.utils})
# load the architecture builder
if self.architecture == 'VGG16':
from tensorflow.keras.applications.vgg16 import VGG16 as ModelBuilder
elif self.architecture == 'VGG19':
from tensorflow.keras.applications.vgg19 import VGG19 as ModelBuilder
elif self.architecture == 'DenseNet121':
from tensorflow.keras.applications.densenet import DenseNet121 as ModelBuilder
elif self.architecture == 'DenseNet169':
from tensorflow.keras.applications.densenet import DenseNet169 as ModelBuilder
elif self.architecture == 'DenseNet201':
from tensorflow.keras.applications.densenet import DenseNet201 as ModelBuilder
elif self.architecture == 'Xception':
from tensorflow.keras.applications.xception import Xception as ModelBuilder
elif self.architecture == 'InceptionV3':
from tensorflow.keras.applications.inception_v3 import InceptionV3 as ModelBuilder
elif self.architecture == 'InceptionResNetV2':
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 as ModelBuilder
elif self.architecture == 'ResNet50':
from keras_applications.resnet import ResNet50 as ModelBuilder
elif self.architecture == 'ResNet101':
from keras_applications.resnet import ResNet101 as ModelBuilder
elif self.architecture == 'ResNet152':
from keras_applications.resnet import ResNet152 as ModelBuilder
elif self.architecture == 'ResNet50V2':
from keras_applications.resnet_v2 import ResNet50V2 as ModelBuilder
elif self.architecture == 'ResNet101V2':
from keras_applications.resnet_v2 import ResNet101V2 as ModelBuilder
elif self.architecture == 'ResNet152V2':
from keras_applications.resnet_v2 import ResNet152V2 as ModelBuilder
elif self.architecture == 'ResNeXt50':
from keras_applications.resnext import ResNeXt50 as ModelBuilder
elif self.architecture == 'ResNeXt101':
from keras_applications.resnext import ResNeXt101 as ModelBuilder
elif self.architecture == 'EfficientNetB7':
from efficientnet.tfkeras import EfficientNetB7 as ModelBuilder
# The following model have a fixed input size requirement
elif self.architecture == 'NASNetMobile':
from tensorflow.keras.applications.nasnet import NASNetMobile as ModelBuilder
elif self.architecture == 'NASNetLarge':
from tensorflow.keras.applications.nasnet import NASNetLarge as ModelBuilder
elif self.architecture == 'MobileNet':
from tensorflow.keras.applications.mobilenet import MobileNet as ModelBuilder
elif self.architecture == 'MobileNetV2':
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 as ModelBuilder
else:
print('Architecture does not exist.')
sys.exit(1)
# build the model's base
cnn = ModelBuilder(**kwargs)
x = cnn.output
# complete the model's base
if self.architecture in ['VGG16', 'VGG19']:
x = Flatten()(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
else:
x = GlobalAveragePooling2D()(x)
if self.architecture == 'EfficientNetB7':
x = Dropout(self.dropout_rate)(x)
cnn_output = x
return cnn.input, cnn_output
def _generate_side_nn(self):
side_nn = Sequential()
side_nn.add(Dense(16, input_dim=len(self.side_predictors), activation="relu",
kernel_regularizer=regularizers.l2(self.weight_decay)))
return side_nn.input, side_nn.output
def _complete_architecture(self, cnn_input, cnn_output, side_nn_input, side_nn_output):
x = concatenate([cnn_output, side_nn_output])
x = Dropout(self.dropout_rate)(x)
for n in [int(self.n_fc_nodes * (2 ** (2 * (self.n_fc_layers - 1 - i)))) for i in range(self.n_fc_layers)]:
x = Dense(n, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
# scale the dropout proportionally to the number of nodes in a layer. No dropout for the last layers
if n > 16:
x = Dropout(self.dropout_rate * n / 1024)(x)
predictions = Dense(1, activation=self.dict_final_activations[self.prediction_type],
kernel_regularizer=regularizers.l2(self.weight_decay))(x)
self.model = Model(inputs=[cnn_input, side_nn_input], outputs=predictions)
def _generate_architecture(self):
cnn_input, cnn_output = self._generate_cnn()
side_nn_input, side_nn_output = self._generate_side_nn()
self._complete_architecture(cnn_input=cnn_input, cnn_output=cnn_output, side_nn_input=side_nn_input,
side_nn_output=side_nn_output)
def _load_model_weights(self):
try:
self.model.load_weights(self.path_load_weights)
except (FileNotFoundError, TypeError):
# load backup weights if the main weights are corrupted
try:
self.model.load_weights(self.path_load_weights.replace('model-weights', 'backup-model-weights'))
except FileNotFoundError:
print('Error. No file was found. imagenet weights should have been used. Bug somewhere.')
sys.exit(1)
@staticmethod
def clean_exit():
# exit
print('\nDone.\n')
print('Killing JOB PID with kill...')
os.system('touch ../eo/' + os.environ['SLURM_JOBID'])
os.system('kill ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB PID with kill -9...')
os.system('kill -9 ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB ID')
os.system('scancel ' + os.environ['SLURM_JOBID'])
time.sleep(60)
print('Everything failed to kill the job. Hanging there until hitting walltime...')
class Training(DeepLearning):
"""
Class to train CNN models:
- Generates the architecture
- Loads the best last weights so that a model can be trained over several jobs
- Generates the callbacks
- Compiles the model
- Trains the model
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False, transfer_learning=None,
continue_training=True, display_full_metrics=True):
# parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.version = self.version + '_' + str(outer_fold)
# NNet's architecture's weights
self.continue_training = continue_training
self.transfer_learning = transfer_learning
self.list_parameters_to_match = ['organ', 'transformation', 'view']
# dict to decide in which order targets should be used when trying to transfer weight from a similar model
self.dict_alternative_targets_for_transfer_learning = {'Age': ['Age', 'Sex'], 'Sex': ['Sex', 'Age']}
# Generators
self.folds = ['train', 'val']
self.mode = 'model_training'
self.class_weights = None
self.GENERATORS = None
# Metrics
self.baseline_performance = None
if display_full_metrics:
self.metrics_names = self.dict_metrics_names_K[self.prediction_type]
# Model
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '.h5'
if debug_mode:
self.path_save_weights = self.path_data + 'model-weights-debug.h5'
else:
self.path_save_weights = self.path_data + 'model-weights_' + self.version + '.h5'
self.n_epochs_max = 100000
self.callbacks = None
# Load and preprocess the data, build the generators
def data_preprocessing(self):
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._generate_class_weights()
self.GENERATORS = self._generate_generators(self.DATA_FEATURES)
# Determine which weights to load, if any.
def _weights_for_transfer_learning(self):
print('Looking for models to transfer weights from...')
# define parameters
parameters = self._version_to_parameters(self.version)
# continue training if possible
if self.continue_training and os.path.exists(self.path_load_weights):
print('Loading the weights from the model\'s previous training iteration.')
return
# Initialize the weights using other the weights from other successful hyperparameters combinations
if self.transfer_learning == 'hyperparameters':
# Check if the same model with other hyperparameters have already been trained. Pick the best for transfer.
params = self.version.split('_')
params_tl_idx = \
[i for i in range(len(names_model_parameters))
if any(names_model_parameters[i] == p for p in
['optimizer', 'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor'])]
for idx in params_tl_idx:
params[idx] = '*'
versions = '../eo/MI02_' + '_'.join(params) + '.out'
files = glob.glob(versions)
if self.main_metric_mode == 'min':
best_perf = np.Inf
else:
best_perf = -np.Inf
for file in files:
hand = open(file, 'r')
# find best last performance
final_improvement_line = None
baseline_performance_line = None
for line in hand:
line = line.rstrip()
if re.search('Baseline validation ' + self.main_metric_name + ' = ', line):
baseline_performance_line = line
if re.search('val_' + self.main_metric_name + ' improved from', line):
final_improvement_line = line
hand.close()
if final_improvement_line is not None:
perf = float(final_improvement_line.split(' ')[7].replace(',', ''))
elif baseline_performance_line is not None:
perf = float(baseline_performance_line.split(' ')[-1])
else:
continue
# Keep track of the file with the best performance
if self.main_metric_mode == 'min':
update = perf < best_perf
else:
update = perf > best_perf
if update:
best_perf = perf
self.path_load_weights = \
file.replace('../eo/', self.path_data).replace('MI02', 'model-weights').replace('.out', '.h5')
if best_perf not in [-np.Inf, np.Inf]:
print('Transfering the weights from: ' + self.path_load_weights + ', with ' + self.main_metric_name +
' = ' + str(best_perf))
return
# Initialize the weights based on models trained on different datasets, ranked by similarity
if self.transfer_learning == 'datasets':
while True:
# print('Matching models for the following criterias:');
# print(['architecture', 'target'] + list_parameters_to_match)
# start by looking for models trained on the same target, then move to other targets
for target_to_load in self.dict_alternative_targets_for_transfer_learning[parameters['target']]:
# print('Target used: ' + target_to_load)
parameters_to_match = parameters.copy()
parameters_to_match['target'] = target_to_load
# load the ranked performances table to select the best performing model among the similar
# models available
path_performances_to_load = self.path_data + 'PERFORMANCES_ranked_' + \
parameters_to_match['target'] + '_' + 'val' + '.csv'
try:
Performances = pd.read_csv(path_performances_to_load)
Performances['organ'] = Performances['organ'].astype(str)
except FileNotFoundError:
# print("Could not load the file: " + path_performances_to_load)
break
# iteratively get rid of models that are not similar enough, based on the list
for parameter in ['architecture', 'target'] + self.list_parameters_to_match:
Performances = Performances[Performances[parameter] == parameters_to_match[parameter]]
# if at least one model is similar enough, load weights from the best of them
if len(Performances.index) != 0:
self.path_load_weights = self.path_data + 'model-weights_' + Performances['version'][0] + '.h5'
self.keras_weights = None
print('transfering the weights from: ' + self.path_load_weights)
return
# if no similar model was found, try again after getting rid of the last selection criteria
if len(self.list_parameters_to_match) == 0:
print('No model found for transfer learning.')
break
self.list_parameters_to_match.pop()
# Otherwise use imagenet weights to initialize
print('Using imagenet weights.')
# using string instead of None for path to not ge
self.path_load_weights = None
self.keras_weights = 'imagenet'
def _compile_model(self):
# if learning rate was reduced with success according to logger, start with this reduced learning rate
if self.path_load_weights is not None:
path_logger = self.path_load_weights.replace('model-weights', 'logger').replace('.h5', '.csv')
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
if os.path.exists(path_logger):
try:
logger = pd.read_csv(path_logger)
best_log = \
logger[logger['val_' + self.main_metric_name] == logger['val_' + self.main_metric_name].max()]
lr = best_log['learning_rate'].values[0]
except pd.errors.EmptyDataError:
os.remove(path_logger)
lr = self.learning_rate
else:
lr = self.learning_rate
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=lr, clipnorm=1.0), loss=self.loss_function,
metrics=self.metrics)
def _compute_baseline_performance(self):
# calculate initial val_loss value
if self.continue_training:
idx_metric_name = ([self.loss_name] + self.metrics_names).index(self.main_metric_name)
baseline_perfs = self.model.evaluate(self.GENERATORS['val'], steps=self.GENERATORS['val'].steps)
self.baseline_performance = baseline_perfs[idx_metric_name]
elif self.main_metric_mode == 'min':
self.baseline_performance = np.Inf
else:
self.baseline_performance = -np.Inf
print('Baseline validation ' + self.main_metric_name + ' = ' + str(self.baseline_performance))
def _define_callbacks(self):
if self.debug_mode:
path_logger = self.path_data + 'logger-debug.csv'
append = False
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
append = self.continue_training
csv_logger = MyCSVLogger(path_logger, separator=',', append=append)
model_checkpoint_backup = MyModelCheckpoint(self.path_save_weights.replace('model-weights',
'backup-model-weights'),
monitor='val_' + self.main_metric.name,
baseline=self.baseline_performance, verbose=1, save_best_only=True,
save_weights_only=True, mode=self.main_metric_mode,
save_freq='epoch')
model_checkpoint = MyModelCheckpoint(self.path_save_weights,
monitor='val_' + self.main_metric.name, baseline=self.baseline_performance,
verbose=1, save_best_only=True, save_weights_only=True,
mode=self.main_metric_mode, save_freq='epoch')
patience_reduce_lr = min(7, 3 * self.GENERATORS['train'].n_subepochs_per_epoch)
reduce_lr_on_plateau = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=patience_reduce_lr, verbose=1,
mode='min', min_delta=0, cooldown=0, min_lr=0)
early_stopping = EarlyStopping(monitor='val_' + self.main_metric.name, min_delta=0, patience=15, verbose=0,
mode=self.main_metric_mode, baseline=self.baseline_performance,
restore_best_weights=True)
self.callbacks = [csv_logger, model_checkpoint_backup, model_checkpoint, early_stopping, reduce_lr_on_plateau]
def build_model(self):
self._weights_for_transfer_learning()
self._generate_architecture()
# Load weights if possible
try:
load_weights = True if os.path.exists(self.path_load_weights) else False
except TypeError:
load_weights = False
if load_weights:
self._load_model_weights()
else:
# save transferred weights as default, in case no better weights are found
self.model.save_weights(self.path_save_weights.replace('model-weights', 'backup-model-weights'))
self.model.save_weights(self.path_save_weights)
self._compile_model()
self._compute_baseline_performance()
self._define_callbacks()
def train_model(self):
# garbage collector
_ = gc.collect()
# use more verbose when debugging
verbose = 1 if self.debug_mode else 2
# train the model
self.model.fit(self.GENERATORS['train'], steps_per_epoch=self.GENERATORS['train'].steps,
validation_data=self.GENERATORS['val'], validation_steps=self.GENERATORS['val'].steps,
shuffle=False, use_multiprocessing=False, workers=self.n_cpus, epochs=self.n_epochs_max,
class_weight=self.class_weights, callbacks=self.callbacks, verbose=verbose)
class PredictionsGenerate(DeepLearning):
"""
Generates the predictions for each model.
Unscales the predictions.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False):
# Initialize parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.mode = 'model_testing'
# Define dictionaries attributes for data, generators and predictions
self.DATA_FEATURES_BATCH = {}
self.DATA_FEATURES_LEFTOVERS = {}
self.GENERATORS_BATCH = None
self.GENERATORS_LEFTOVERS = None
self.PREDICTIONS = {}
def _split_batch_leftovers(self):
# split the samples into two groups: what can fit into the batch size, and the leftovers.
for fold in self.folds:
n_leftovers = len(self.DATA_FEATURES[fold].index) % self.n_ids_batch
if n_leftovers > 0:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold].iloc[:-n_leftovers]
self.DATA_FEATURES_LEFTOVERS[fold] = self.DATA_FEATURES[fold].tail(n_leftovers)
else:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold] # special case for syntax if no leftovers
if fold in self.DATA_FEATURES_LEFTOVERS.keys():
del self.DATA_FEATURES_LEFTOVERS[fold]
def _generate_outerfold_predictions(self):
# prepare unscaling
if self.target in self.targets_regression:
mean_train = self.DATA_FEATURES['train'][self.target + '_raw'].mean()
std_train = self.DATA_FEATURES['train'][self.target + '_raw'].std()
else:
mean_train, std_train = None, None
# Generate predictions
for fold in self.folds:
print('Predicting samples from fold ' + fold + '.')
print(str(len(self.DATA_FEATURES[fold].index)) + ' samples to predict.')
print('Predicting batches: ' + str(len(self.DATA_FEATURES_BATCH[fold].index)) + ' samples.')
pred_batch = self.model.predict(self.GENERATORS_BATCH[fold], steps=self.GENERATORS_BATCH[fold].steps,
verbose=1)
if fold in self.GENERATORS_LEFTOVERS.keys():
print('Predicting leftovers: ' + str(len(self.DATA_FEATURES_LEFTOVERS[fold].index)) + ' samples.')
pred_leftovers = self.model.predict(self.GENERATORS_LEFTOVERS[fold],
steps=self.GENERATORS_LEFTOVERS[fold].steps, verbose=1)
pred_full = np.concatenate((pred_batch, pred_leftovers)).squeeze()
else:
pred_full = pred_batch.squeeze()
print('Predicted a total of ' + str(len(pred_full)) + ' samples.')
# take the average between left and right predictions for paired organs
if self.organ + '_' + self.view in self.left_right_organs_views:
pred_full = np.mean(pred_full.reshape(-1, 2), axis=1)
# unscale predictions
if self.target in self.targets_regression:
pred_full = pred_full * std_train + mean_train
# format the dataframe
self.DATA_FEATURES[fold]['pred'] = pred_full
self.PREDICTIONS[fold] = self.DATA_FEATURES[fold]
self.PREDICTIONS[fold]['id'] = [ID.replace('.jpg', '') for ID in self.PREDICTIONS[fold]['id']]
def _generate_predictions(self):
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '_' + self.outer_fold + '.h5'
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._load_model_weights()
self._split_batch_leftovers()
# generate the generators
self.GENERATORS_BATCH = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_BATCH)
if self.DATA_FEATURES_LEFTOVERS is not None:
self.GENERATORS_LEFTOVERS = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_LEFTOVERS)
self._generate_outerfold_predictions()
def _format_predictions(self):
for fold in self.folds:
perf_fun = self.dict_metrics_sklearn[self.dict_main_metrics_names[self.target]]
perf = perf_fun(self.PREDICTIONS[fold][self.target + '_raw'], self.PREDICTIONS[fold]['pred'])
print('The ' + fold + ' performance is: ' + str(perf))
# format the predictions
self.PREDICTIONS[fold].index.name = 'column_names'
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][['id', 'outer_fold', 'pred']]
def generate_predictions(self):
self._generate_architecture()
self._generate_predictions()
self._format_predictions()
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold + '_'
+ self.outer_fold + '.csv', index=False)
class PredictionsConcatenate(Basics):
"""
Concatenates the predictions coming from the different cross validation folds.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None):
# Initialize parameters
Basics.__init__(self)
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# Define dictionaries attributes for data, generators and predictions
self.PREDICTIONS = {}
def concatenate_predictions(self):
for fold in self.folds:
for outer_fold in self.outer_folds:
Predictions_fold = pd.read_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'_' + outer_fold + '.csv')
if fold in self.PREDICTIONS.keys():
self.PREDICTIONS[fold] = pd.concat([self.PREDICTIONS[fold], Predictions_fold])
else:
self.PREDICTIONS[fold] = Predictions_fold
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'.csv', index=False)
class PredictionsMerge(Basics):
"""
Merges the predictions from all models into a unified dataframe.
"""
def __init__(self, target=None, fold=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.data_features = None
self.list_models = None
self.Predictions_df_previous = None
self.Predictions_df = None
def _load_data_features(self):
self.data_features = pd.read_csv(self.path_data + 'data-features_instances.csv',
usecols=self.id_vars + self.demographic_vars)
for var in self.id_vars:
self.data_features[var] = self.data_features[var].astype(str)
self.data_features.set_index('id', drop=False, inplace=True)
self.data_features.index.name = 'column_names'
def _preprocess_data_features(self):
# For the training set, each sample is predicted n_CV_outer_folds times, so prepare a larger dataframe
if self.fold == 'train':
df_all_folds = None
for outer_fold in self.outer_folds:
df_fold = self.data_features.copy()
df_all_folds = df_fold if outer_fold == self.outer_folds[0] else df_all_folds.append(df_fold)
self.data_features = df_all_folds
def _load_previous_merged_predictions(self):
if os.path.exists(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold +
'.csv'):
self.Predictions_df_previous = pd.read_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' +
self.target + '_' + self.fold + '.csv')
self.Predictions_df_previous.drop(columns=['eid', 'instance'] + self.demographic_vars, inplace=True)
def _list_models(self):
# generate list of predictions that will be integrated in the Predictions dataframe
self.list_models = glob.glob(self.path_data + 'Predictions_instances_' + self.target + '_*_' + self.fold +
'.csv')
# get rid of ensemble models and models already merged
self.list_models = [model for model in self.list_models if ('*' not in model)]
if self.Predictions_df_previous is not None:
self.list_models = \
[model for model in self.list_models
if ('pred_' + '_'.join(model.split('_')[2:-1]) not in self.Predictions_df_previous.columns)]
self.list_models.sort()
def preprocessing(self):
self._load_data_features()
self._preprocess_data_features()
self._load_previous_merged_predictions()
self._list_models()
def merge_predictions(self):
# merge the predictions
print('There are ' + str(len(self.list_models)) + ' models to merge.')
i = 0
# define subgroups to accelerate merging process
list_subgroups = list(set(['_'.join(model.split('_')[3:7]) for model in self.list_models]))
for subgroup in list_subgroups:
print('Merging models from the subgroup ' + subgroup)
models_subgroup = [model for model in self.list_models if subgroup in model]
Predictions_subgroup = None
# merge the models one by one
for file_name in models_subgroup:
i += 1
version = '_'.join(file_name.split('_')[2:-1])
if self.Predictions_df_previous is not None and \
'pred_' + version in self.Predictions_df_previous.columns:
print('The model ' + version + ' has already been merged before.')
else:
print('Merging the ' + str(i) + 'th model: ' + version)
# load csv and format the predictions
prediction = pd.read_csv(self.path_data + file_name)
print('raw prediction\'s shape: ' + str(prediction.shape))
for var in ['id', 'outer_fold']:
prediction[var] = prediction[var].apply(str)
prediction.rename(columns={'pred': 'pred_' + version}, inplace=True)
# merge data frames
if Predictions_subgroup is None:
Predictions_subgroup = prediction
elif self.fold == 'train':
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer',
on=['id', 'outer_fold'])
else:
prediction.drop(['outer_fold'], axis=1, inplace=True)
# not supported for panda version > 0.23.4 for now
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer', on=['id'])
# merge group predictions data frames
if self.fold != 'train':
Predictions_subgroup.drop(['outer_fold'], axis=1, inplace=True)
if Predictions_subgroup is not None:
if self.Predictions_df is None:
self.Predictions_df = Predictions_subgroup
elif self.fold == 'train':
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer',
on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer', on=['id'])
print('Predictions_df\'s shape: ' + str(self.Predictions_df.shape))
# garbage collector
gc.collect()
# Merge with the previously merged predictions
if (self.Predictions_df_previous is not None) & (self.Predictions_df is not None):
if self.fold == 'train':
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer',
on=['id', 'outer_fold'])
else:
self.Predictions_df.drop(columns=['outer_fold'], inplace=True)
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer', on=['id'])
self.Predictions_df_previous = None
elif self.Predictions_df is None:
print('No new models to merge. Exiting.')
print('Done.')
sys.exit(0)
# Reorder the columns alphabetically
pred_versions = [col for col in self.Predictions_df.columns if 'pred_' in col]
pred_versions.sort()
id_cols = ['id', 'outer_fold'] if self.fold == 'train' else ['id']
self.Predictions_df = self.Predictions_df[id_cols + pred_versions]
def postprocessing(self):
# get rid of useless rows in data_features before merging to keep the memory requirements as low as possible
self.data_features = self.data_features[self.data_features['id'].isin(self.Predictions_df['id'].values)]
# merge data_features and predictions
if self.fold == 'train':
print('Starting to merge a massive dataframe')
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id'])
print('Merging done')
# remove rows for which no prediction is available (should be none)
subset_cols = [col for col in self.Predictions_df.columns if 'pred_' in col]
self.Predictions_df.dropna(subset=subset_cols, how='all', inplace=True)
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_df.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_df[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def save_merged_predictions(self):
print('Writing the merged predictions...')
self.Predictions_df.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' +
self.fold + '.csv', index=False)
class PredictionsEids(Basics):
"""
Computes the average age prediction across samples from different instances for every participant.
(Scaled back to instance 0)
"""
def __init__(self, target=None, fold=None, debug_mode=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.debug_mode = debug_mode
self.Predictions = None
self.Predictions_chunk = None
self.pred_versions = None
self.res_versions = None
self.target_0s = None
self.Predictions_eids = None
self.Predictions_eids_previous = None
self.pred_versions_previous = None
def preprocessing(self):
# Load predictions
self.Predictions = pd.read_csv(
self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold + '.csv')
self.Predictions.drop(columns=['id'], inplace=True)
self.Predictions['eid'] = self.Predictions['eid'].astype(str)
self.Predictions.index.name = 'column_names'
self.pred_versions = [col for col in self.Predictions.columns.values if 'pred_' in col]
# Prepare target values on instance 0 as a reference
target_0s =
|
pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', self.target])
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 28 14:02:04 2021
@author: hk_nien
"""
import re
import numpy as np
import pandas as pd
import scipy.interpolate
import matplotlib.pyplot as plt
from tools import set_xaxis_dateformat
def load_tvt_data():
"""Return DataFrame with index date (mid-week 12:00), num_test, num_pos, f_pos."""
records = []
with open('data/TvT.txt') as f:
for li in f.readlines():
if li.startswith('#') or len(li) < 2:
continue
# typical line: "22-03-2021 - 28-03-2021 5081 16 0.3"
fields = li.split()
dates = [
pd.to_datetime(fields[i], format='%d-%m-%Y')
for i in [0, 2]
]
n_test = int(fields[3])
n_pos = int(fields[4])
date_mid = dates[0] + (dates[1]-dates[0])/2 + pd.Timedelta('12 h')
records.append((date_mid, dates[0], dates[1], n_test, n_pos))
df = pd.DataFrame.from_records(
records, columns=['Date_mid', 'Date_a', 'Date_b', 'num_test', 'num_pos']
)
if df.iloc[-1]['Date_b'] < pd.to_datetime('now') - pd.to_timedelta('9 d, 15:15:00'):
print(
'** Warning: TvT data may be outdated. Update data/TvT.txt from '
'RIVM weekly report at '
'https://www.rivm.nl/coronavirus-covid-19/actueel/'
'wekelijkse-update-epidemiologische-situatie-covid-19-in-nederland .'
)
df = df.set_index('Date_mid')
df['f_pos'] = df['num_pos'] / df['num_test']
return df
def get_R_from_TvT():
"""Return DataFrame with R estimate from TvT data.
Return DataFrame:
- index: datetime index (12:00)
- R: R estimate (one per week)
- R_err: estimated R error (2sigma), one per week.
- R_interp: interpolated R values (daily)
"""
df = load_tvt_data()
date0 = df.index[0]
# ts: day number since start date
ts = (df.index - date0) /
|
pd.Timedelta('1 d')
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 11:50:49 2021
@author: peter
"""
import numpy as np
import pandas as pd
from base import BaseModel, GarchBase
from stats import loglikelihood_normal, loglikelihood_student_t
from weights import Beta
from helper_functions import create_matrix
from datetime import datetime, timedelta
import time
import scipy.stats as stats
class MIDAS(BaseModel):
def __init__(self, lag = 22, plot = True, *args):
self.lag = lag
self.plot = plot
self.args = args
def initialize_params(self, X):
"""
This function is about to create the initial parameters.
Return a sequance of 1.0 value that has the necessary length.
Parameters
----------
X : DataFrame
Pandas dataframe that contains all the regressors.
Returns
-------
init_params: numpy.array
Numpy array that contain the required amount of initial parameters.
"""
self.init_params = np.linspace(1.0, 1.0, int(1.0 + X.shape[1] * 2.0))
return self.init_params
def model_filter(self, params, x):
"""
This function is about to create the model's equation.
Parameters
----------
params : numpy.array
Numpy array that contain the required amount of parameters.
x : Dictionary
Dictionary that contains all the lagged regressors.
Returns
-------
model : numpy.array
Numpy array that return the values from the specification.
"""
model = params[0]
for i in range(1, len(x) + 1):
model += params[2 * i - 1] * Beta().x_weighted(x['X{num}'.format(num = i)], [1.0, params[2 * i]])
return model
def loglikelihood(self, params, X, y):
"""
This function is about to calculate the negative loglikelihood function's value.
Parameters
----------
params : numpy.array
Numpy array that contain the required amount of parameters.
X : DataFrame
Pandas dataframe that contain all the regressors.
y : pandas.Series or numpy.array
Sequance that contains the dependent variable.
Returns
-------
TYPE
DESCRIPTION.
"""
X = create_matrix(X, self.lag)
return np.sum((y - self.model_filter(params, X)) ** 2)
def predict(self, X):
X = create_matrix(X, self.lag)
return self.model_filter(self.optimized_params, X)
def simulate(self, params = [2.0, 0.5, 5.0], lag = 12, num = 500):
y = np.zeros(num)
x = np.exp(np.cumsum(np.random.normal(0.5, 2, num) / 100))
alpha, beta, theta = params[0], params[1], params[2]
for i in range(num):
if i < lag:
y[i] = alpha
else:
y[i] = alpha + beta * Beta().x_weighted(x[i - lag : i][::-1].reshape((1, lag)), [1.0, theta])
return x, y
class MIDAS_sim(BaseModel):
def __init__(self, lag = 22, plot = True, *args):
self.lag = lag
self.plot = plot
self.args = args
def initialize_params(self, X):
self.init_params = np.linspace(1, 1, 3)
return self.init_params
def model_filter(self, params, X, y):
if isinstance(y, int) or isinstance(y, float):
T = y
else:
T = len(y)
model = np.zeros(T)
for i in range(T):
model[i] = params[0] + params[1] * Beta().x_weighted(X[i * self.lag : (i + 1) * self.lag].reshape((1, self.lag)), [1.0, params[2]])
return model
def loglikelihood(self, params, X, y):
return np.sum((y - self.model_filter(params, X, y)) ** 2)
def simulate(self, params = [0.1, 0.3, 4.0], num = 500, K = 22):
X = np.zeros(num * K)
y = np.zeros(num)
for i in range(num * K):
if i == 0:
X[i] = np.random.normal()
else:
X[i] = 0.9 * X[i - 1] + np.random.normal()
for i in range(num):
y[i] = params[0] + params[1] * Beta().x_weighted(X[i * K : (i + 1) * K].reshape((1, K)), [1.0, params[2]]) + np.random.normal(scale = 0.7**2)
return X, y
def create_sims(self, number_of_sims = 500, length = 500, K = 22, params = [0.1, 0.3, 4.0]):
lls, b0, b1, th, runtime = np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims)
for i in range(number_of_sims):
np.random.seed(i)
X, y = self.simulate(params = params, num = length, K = K)
start = time.time()
self.fit(['pos', 'pos', 'pos'], X, y)
runtime[i] = time.time() - start
lls[i] = self.opt.fun
b0[i], b1[i], th[i] = self.optimized_params[0], self.optimized_params[1], self.optimized_params[2]
return pd.DataFrame(data = {'LogLike': lls,
'Beta0': b0,
'Beta1': b1,
'Theta':th})
def forecasting(self, X, k = 10):
X_n = np.zeros(k * 22)
for i in range(k * 22):
if i == 0:
X_n[i] = 0.9 * X[-1] + np.random.normal()
else:
X_n[i] = 0.9 * X_n[i - 1] + np.random.normal()
try:
y_hat = self.model_filter(self.optimized_params, X_n, k)
except:
params = input('Please give the parameters:')
return X_n, y_hat
class GARCH(BaseModel):
def __init__(self, plot = True, *args):
self.plot = plot
self.args = args
def initialize_params(self, y):
self.init_params = np.asarray([0.0, 0.05, 0.02, 0.95])
return self.init_params
def model_filter(self, params, y):
sigma2 = np.zeros(len(y))
resid = y - params[0]
for i in range(len(y)):
if i == 0:
sigma2[i] = params[1] / (1 - params[2] - params[3])
else:
sigma2[i] = params[1] + params[2] * resid[i - 1] ** 2 + params[3] * sigma2[i - 1]
return sigma2
def loglikelihood(self, params, y):
sigma2 = self.model_filter(params, y)
resid = y - params[0]
return loglikelihood_normal(resid, sigma2)
def simulate(self, params = [0.0, 0.2, 0.2, 0.6], num = 500):
y = np.zeros(num)
state = np.zeros(num)
for i in range(num):
if i == 0:
state[i] = params[1] / (1 - params[2] - params[3])
else:
state[i] = params[1] + params[2] * y[i - 1] * y[i - 1] + params[3] * state[i - 1]
y[i] = stats.norm.rvs(loc = params[0], scale = np.sqrt(state[i]))
return y, state
def predict(self, X):
return self.model_filter(self.optimized_params, X)
class T_GARCH(BaseModel):
def __init__(self, plot = True, *args):
self.plot = plot
self.args = args
def initialize_params(self, y):
self.init_params = np.asarray([0.1, 0.02, 0.95, 3.75])
return self.init_params
def model_filter(self, params, y):
sigma2 = np.zeros(len(y))
resid = y - params[0]
for i in range(len(y)):
if i == 0:
sigma2[i] = params[1] / (1 - params[2] - params[3])
else:
sigma2[i] = params[1] + params[2] * resid[i - 1] ** 2 + params[3] * sigma2[i - 1]
return sigma2
def loglikelihood(self, params, y):
sigma2 = self.model_filter(params, y)
resid = y - params[0]
nu = params[4]
return loglikelihood_student_t(resid, sigma2, nu)
def simulate(self, params = [0.0, 0.2, 0.2, 0.6, 3.0], num = 500):
y = np.zeros(num)
state = np.zeros(num)
for i in range(num):
if i == 0:
state[i] = params[1] / (1 - params[2] - params[3])
else:
state[i] = params[1] + params[2] * y[i - 1] * y[i - 1] + params[3] * state[i - 1]
y[i] = stats.t.rvs(params[4], loc = params[3], scale = np.sqrt(state[i]))
return y, state
def predict(self, X):
return self.model_filter(self.optimized_params, X)
class GARCH_MIDAS(BaseModel):
def __init__(self, lag = 22, plot = True, *args):
self.lag = lag
self.plot = plot
self.args = args
def initialize_params(self, X):
"""
This function is about to create the initial parameters and
collect the indexes of monthly and daily data columns.
Parameters
----------
X : DataFrame
Pandas dataframe that contains all the regressors
Returns
-------
init_params: numpy.array
Numpy array that contain the required amount of initial parameters.
"""
# Empty array for the column indexes of daily regressors
daily_index = np.array([])
# Empty array for the column indexes of monthly regressors
monthly_index = np.array([])
# Initial GARCH parameters
garch_params = np.array([0.05, 0.05, 0.02, 0.95])
# An array where there will be the required amount of parameter for the modeling.
midas_params = np.array([1.0])
for i in range(X.shape[1]):
# Calculate the ratio of unique observation divided by the number of whole observations
ratio = X.iloc[:, i].unique().shape[0] / X.shape[0]
# Let's assume that the ratio for monthly observation will be close to 12/365,
# so I set the critial point to 0.05.
if ratio <= 0.05:
midas_params = np.append(midas_params, [1.0])
monthly_index = np.append(monthly_index, i)
else:
midas_params = np.append(midas_params, [1.0, 1.0])
daily_index = np.append(daily_index, i)
self.monthly = monthly_index
self.daily = daily_index
self.init_params = np.append(garch_params, midas_params)
return self.init_params
def model_filter(self, params, X, y):
"""
This function is about to create the model's equation.
The short-/long-term component will be calculated as well.
Parameters
----------
params : numpy.array
Numpy array that contain the required amount of parameters.
X : DataFrame
Pandas dataframe that contains all the regressors
y : pandas.Series or numpy.array
Sequance that contains the dependent variable.
Returns
-------
sigma2 : numpy.array
Numpy array that return the values from the specification.
"""
# Array of zeros with length of the dependent variable
self.g = np.zeros(len(y))
resid = y - params[0]
sigma2 = np.zeros(len(y))
# Empty list to collect the ...
plc = []
uncond_var = params[1] / (1 - params[2] - params[3])
# 'per' is an array of periods (monthly). For example [(2010, 1), ...]
per = X.index.to_period('M')
# 'uniq' contains the unique dates (monthly)
uniq = np.asarray(per.unique())
# Array of zeros with length of the number of unique monthly dates
self.tau = np.zeros(len(uniq))
for t in range(len(uniq)):
if t == 0:
plc.append(np.where((per >= uniq[t].strftime('%Y-%m')) & (per < uniq[t + 1].strftime('%Y-%m')))[0])
# 'new_d' is a empty array if t equal to zero. I will collect daily regressors in 'new_d'
# so in the first month I assume didn't have any knowledge about the past.
new_d = np.array([])
elif t != len(uniq) - 1:
plc.append(np.where((per >= uniq[t].strftime('%Y-%m')) & (per < uniq[t + 1].strftime('%Y-%m')))[0])
# 'dd' contain the values of daily regressors from the previous period.
dd = X.iloc[plc[t - 1], self.daily].values
if len(dd) < self.lag:
# I create 'pad' variable to build matrixes with the same size. It is a crutial step
# because in january we have more observations than february, so I made an assumption,
# that each month have a length is equal to the size of lag.
pad = np.zeros((self.lag - len(dd), dd.shape[1]))
new_d = np.vstack([dd[::-1], pad]).T
else:
# If we have more observation than lag, I dropped out the last (length - lag)
new_d = dd[len(dd) - self.lag:][::-1].T
else:
plc.append(np.where(per >= uniq[t].strftime('%Y-%m'))[0])
dd = X.iloc[plc[t - 1], self.daily].values
if len(dd) < self.lag:
pad = np.zeros((self.lag - len(dd), dd.shape[1]))
new_d = np.vstack([dd[::-1], pad]).T
else:
new_d = dd[len(dd) - self.lag:][::-1].T
# First, I added monthly variables to tau and the intercept
self.tau[t] = params[4] + np.dot(X.iloc[plc[t], self.monthly].values[0], params[5 : 5 + len(self.monthly)])
# Finally, I added daily observations from t-1 period specfied with Beta function
for j in range(len(new_d)):
x = new_d[j].reshape((1, self.lag))
self.tau[t] += params[5 + len(self.monthly) + j] * Beta().x_weighted(x, [1.0, params[(5 + len(self.monthly + self.daily) + j)]])
for i in plc[t]:
if i == 0:
self.g[i] = uncond_var
sigma2[i] = self.g[i] * self.tau[t]
else:
self.g[i] = uncond_var * (1 - params[2] - params[3]) + params[2] * ((resid[i-1] ** 2) / self.tau[t]) + params[3] * self.g[i - 1]
sigma2[i] = self.g[i] * self.tau[t]
return sigma2
def loglikelihood(self, params, X, y):
sigma2 = self.model_filter(params, X, y)
resid = y - params[0]
return loglikelihood_normal(resid, sigma2)
def predict(self, X, y):
return self.model_filter(self.optimized_params, X, y)
class MGARCH(BaseModel):
def __init__(self, lag = 12, plot = True, *args):
self.lag = lag
self.plot = plot
self.args = args
def initialize_params(self, X):
try:
X_len = X.shape[1]
except:
X_len = 1
garch_params = np.array([0.1, 0.85])
midas_params = np.linspace(1.0, 1.0, int(1.0 + X_len * 2.0))
self.init_params = np.append(garch_params, midas_params)
return self.init_params
def model_filter(self, params, X, y):
self.g = np.zeros(len(y))
self.tau = np.zeros(len(X))
sigma2 = np.zeros(len(y))
try:
X_len = X.shape[1]
except:
X_len = 1
alpha1, beta1 = params[0], params[1]
resid = y
uncond_var = np.mean(y ** 2)
for t in range(len(X) - 1):
if t == 0:
m = np.where(y.index < X.index[t + 1])[0]
else:
m = np.where((y.index >= X.index[t]) & (y.index < X.index[t + 1]))[0]
if t - self.lag < 0:
self.tau[t] = params[2]
for par in range(1, X_len + 1):
self.tau[t] += params[2 * par + 1] * Beta().x_weighted(X.iloc[ : t, par - 1][::-1].values.reshape((1, X.iloc[ : t, par - 1].shape[0])), [1.0, params[2 * par + 2]])
else:
self.tau[t] = params[2]
for par in range(1, X_len + 1):
self.tau[t] += params[2 * par + 1] * Beta().x_weighted(X.iloc[t - self.lag : t, par - 1][::-1].values.reshape((1, X.iloc[t - self.lag : t, par - 1].shape[0])), [1.0, params[2 * par + 2]])
for i in m:
if i == 0:
self.g[i] = uncond_var
else:
self.g[i] = uncond_var * (1 - alpha1 - beta1) + alpha1 * (resid[i - 1] ** 2) / self.tau[t] + beta1 * self.g[i - 1]
sigma2[i] = self.g[i] * self.tau[t]
return sigma2
def loglikelihood(self, params, X, y):
sigma2 = self.model_filter(params, X, y)
resid = y
return loglikelihood_normal(resid, sigma2)
def predict(self, X, y):
return self.model_filter(self.optimized_params, X, y)
def simulate(self, params = [0.0, 0.1, 0.2, 0.6, 0.4, 0.005, 5.0], lag = 12, num = 100):
rv = np.zeros(num)
tau = np.zeros(num)
g = np.zeros(num * 22)
sigma2 = np.zeros(num * 22)
y = np.zeros(num * 22)
mu, omega, alpha, beta, m, pszi, theta = params[0], params[1], params[2], params[3], params[4], params[5], params[6]
uncond_var = omega / (1 - alpha - beta)
for t in range(num):
if t - lag < 0:
rv[t] = np.sum(y[(t - 1 )* 22 : t * 22] ** 2)
tau[t] = m + pszi * Beta().x_weighted(rv[:t][::-1].reshape((1, rv[:t].shape[0])), [1.0, theta])
else:
rv[t] = np.sum(y[(t - 1 )* 22 : t * 22] ** 2)
tau[t] = m + pszi * Beta().x_weighted(rv[t - lag : t][::-1].reshape((1, rv[t - lag : t].shape[0])), [1.0, theta])
for i in range(t * 22, (t + 1) * 22):
if t == 0:
if i == 0:
g[i] = uncond_var
else:
g[i] = uncond_var * (1 - alpha - beta) + alpha * (y[i - 1] - mu) ** 2 + beta * g[i - 1]
sigma2[i] = g[i]
y[i] = stats.norm.rvs(loc = params[0], scale = np.sqrt(sigma2[i]))
else:
g[i] = uncond_var * (1 - alpha - beta) + alpha * ((y[i - 1] - mu) ** 2) / tau[t] + beta * g[i - 1]
sigma2[i] = g[i] * tau[t]
y[i] = stats.norm.rvs(loc = params[0], scale = np.sqrt(sigma2[i]))
return rv, tau, g, sigma2, y
class GARCH_MIDAS_sim(BaseModel):
def __init__(self, lag = 36, plot = True, *args):
self.lag = lag
self.plot = plot
self.args = args
def initialize_params(self, X):
self.init_params = np.array([0.05, 0.5, 0.5, 0.5, 0.5, 1.0])
return self.init_params
def model_filter(self, params, X, y):
self.tau = np.zeros(len(X))
self.g = np.zeros(len(y))
sigma2 = np.zeros(len(y))
I_t = int(len(y) / len(X))
mu = params[0]
alpha1 = params[1]
beta1 = params[2]
m = params[3]
theta = params[4]
w = params[5]
X_t = np.zeros((len(X), self.lag))
for t in range(len(X)):
if t < self.lag:
X_t[t] = np.hstack((X[ : t][::-1], np.zeros(self.lag - t)))
else:
X_t[t] = X[t - self.lag : t][::-1]
self.tau = np.exp(m + theta * Beta().x_weighted(X_t, [1.0, w]))
j = 0
for i in range(len(y)):
if i % I_t == 0:
j += 1
if i == 0:
self.g[i] = 1
else:
self.g[i] = (1 - alpha1 - beta1) + alpha1 * ((y[i - 1] - mu) ** 2) / self.tau[j - 1] + beta1 *self.g[i - 1]
sigma2[i] = self.g[i] * self.tau[j - 1]
return sigma2
def loglikelihood(self, params, X, y):
sigma2 = self.model_filter(params, X, y)
resid = y - params[0]
return loglikelihood_normal(resid, sigma2)
def simulate(self,
params = [0.0, 0.06, 0.91, 0.1, 0.3, 4.0, 0.9, 0.09],
num = 480,
lag = 36,
I_t = 22):
X = np.zeros(num)
tau = np.zeros(num)
g = np.zeros(num * I_t)
sigma2 = np.zeros(num * I_t)
r = np.zeros(num * I_t)
X_t = np.zeros((num, lag))
mu = params[0]
alpha1 = params[1]
beta1 = params[2]
m = params[3]
theta = params[4]
w = params[5]
fi = params[6]
sigma_fi = params[7]
j = 0
for i in range(num):
if i == 0:
X[i] = np.random.normal(0.0, sigma_fi)
else:
X[i] = fi * X[i - 1] + np.random.normal(0.0, sigma_fi)
for i in range(num):
if i < lag:
X_t[i] = np.hstack((X[ : i][::-1], np.zeros(lag - i)))
else:
X_t[i] = X[i - lag : i][::-1]
tau = np.exp(m + theta * Beta().x_weighted(X_t, [1.0, w]))
for i in range(num * I_t):
if i % I_t == 0:
j += 1
if i == 0:
g[i] = 1
else:
g[i] = 1 - alpha1 - beta1 + alpha1 * (r[i - 1]) ** 2 / tau[j - 1] + beta1 * g[i - 1]
sigma2[i] = g[i] * tau[j - 1]
r[i] = stats.norm.rvs(loc = mu, scale = np.sqrt(sigma2[i]))
return X, r, tau, g, sigma2
class Panel_GARCH(BaseModel):
def __init__(self, plot = True, dist = 'normal', *args):
self.plot = plot
self.dist = dist
self.args = args
def initialize_params(self, X):
if self.dist == 'normal':
self.init_params = np.array([0.4, 0.4])
elif self.dist == 'student-t':
self.init_params = np.array([0.4, 0.4, 4.0])
else:
raise ValueError("ValueError exception thrown")
return self.init_params
def model_filter(self, params, X):
sigma2 = np.zeros_like(X)
alpha, beta = params[0], params[1]
uncond_var = np.nanmean(X ** 2, axis = 0)
nans = X.isna().sum().values
X = X.values
for i in range(sigma2.shape[0]):
for j in range(sigma2.shape[1]):
if nans[j] == i:
sigma2[i][j] = uncond_var[j]
elif nans[j] < i:
sigma2[i][j] = uncond_var[j] * (1 - alpha - beta) + alpha * (X[i - 1][j] ** 2) + beta * sigma2[i - 1][j]
else:
pass
return sigma2
def loglikelihood(self, params, X):
sigma2 = self.model_filter(params, X)
if self.dist == 'normal':
lls = loglikelihood_normal(X, sigma2).sum()
elif self.dist == 'student-t':
lls = loglikelihood_student_t(X, sigma2, params[2]).sum()
return lls
def simulate(self, params = [0.06, 0.91], num = 100, length = 1000):
sigma2 = np.zeros((length, num))
r = np.zeros((length, num))
alpha, beta = params[0], params[1]
for t in range(length):
if t == 0:
sigma2[t] = 1.0
else:
sigma2[t] = 1 - alpha - beta + alpha * (r[t - 1] ** 2) + beta * sigma2[t - 1]
r[t] = np.random.normal(0.0, np.sqrt(sigma2[t]))
return sigma2, r
def forecast(self, X, H):
X_new = X
X_new.loc[X.shape[0]] = 0
sigma2 = self.model_filter(self.optimized_params, X_new)
sigma2 = sigma2 * np.sqrt(H)
return sigma2[-1]
class Panel_GARCH_CSA(BaseModel):
"""
Panel GARCH with cross sectional adjustment
$r_{it} = \sigma_{it} c_t \epsilon_{it}$
$\mu_i = \frac{1}{N} \sum_{i = 1}^N r_{it}^2$
$c_t = (1 - \phi) + \phi \sqrt{ \frac{1}{N} \sum_{i = 1}^N (\frac{r_{it-1}}{\sigma_{it-1} c_{t-1}} - \frac{1}{N} \sum_{i = 1}^N \frac{r_{it-1}}{\sigma_{it-1} c_{t-1}} )^2}$
$\sigma_{it}^2 = \mu_i (1 - \alpha - \beta) + \alpha \epsilon_{it-1}^2 + \beta \sigma_{it-1}^2$
"""
def __init__(self, plot = True, dist = 'normal', *args):
self.plot = plot
self.dist = dist
self.args = args
def initialize_params(self, X):
if self.dist == 'normal':
self.init_params = np.array([0.1, 0.5, 0.5])
elif self.dist == 'student-t':
self.init_params = np.array([0.1, 0.5, 0.5, 4.0])
return self.init_params
def model_filter(self, params, y):
c = np.zeros(y.shape[0])
sigma2 = np.zeros(y.shape)
T, N = y.shape
mu = np.nanmean(y ** 2, axis = 0)
y = y.values
phi, alpha, beta = params[0], params[1], params[2]
for t in range(T):
if t == 0:
c[t] = 1.0
for i in range(N):
if np.isnan(y[t][i]) == True:
sigma2[t][i] = np.nan
else:
sigma2[t][i] = mu[i]
else:
c[t] = (1 - phi) + phi * np.nanstd(y[t - 1] / (np.sqrt(sigma2[t - 1]) * c[t - 1]))
for i in range(N):
if np.isnan(y[t][i]) == True:
if np.isnan(y[t - 1][i]) == True:
sigma2[t][i] = np.nan
else:
sigma2[t][i] = mu[i]
else:
if np.isnan(sigma2[t - 1][i]) == False:
sigma2[t][i] = mu[i] * (1 - alpha - beta) + alpha * (y[t - 1][i] / (np.sqrt(sigma2[t - 1][i]) * c[t - 1])) ** 2 + beta * sigma2[t - 1][i]
else:
sigma2[t][i] = mu[i]
return sigma2, c
def loglikelihood(self, params, y):
sigma2, _ = self.model_filter(params, y)
lls = 0
sigma2 = sigma2.T
for i in range(y.shape[1]):
idx = np.where(np.isnan(sigma2[i]) == False)[0]
sig = sigma2[i][idx]
xx = y.iloc[idx, i].values
if len(sig) == 0.0:
lls += 0.0
else:
if self.dist == 'normal':
lls += loglikelihood_normal(xx, sig)
elif self.dist == 'student-t':
lls += loglikelihood_student_t(xx, sig, params[3])
return lls
def simulate(self, params = [0.1, 0.2, 0.6], num = 100, length = 500):
c = np.zeros(length)
sigma2 = np.zeros((length, num))
ret = np.zeros((length, num))
phi, alpha, beta = params[0], params[1], params[2]
for t in range(length):
if t == 0:
c[t] = 1.0
sigma2[t] = 1.0
else:
c[t] = (1 - phi) + phi * np.std(ret[t - 1] / (sigma2[t - 1] * c[t - 1]))
mu = np.mean(ret[ : t] ** 2, axis = 0)
sigma2[t] = mu * (1 - alpha - beta) + alpha * (ret[t - 1] / (sigma2[t - 1] * c[t - 1])) ** 2 + beta * sigma2[t - 1]
ret[t] = stats.norm.rvs(loc = 0.0, scale = np.sqrt(sigma2[t]))
return ret, sigma2, c
def forecast(self, y):
row_nul = pd.DataFrame([[0]*y.shape[1]], columns = y.columns)
y = y.append(row_nul)
sigma2, _ = self.model_filter(self.optimized_params, y)
forecast = sigma2[-1]
forecast[np.where(forecast == 0)[0]] = np.nan
return forecast
class Panel_MIDAS(BaseModel):
def __init__(self, lag = 12, plot = True, exp = True, *args):
self.lag = lag
self.plot = plot
self.exp = exp
self.args = args
def initialize_params(self, X):
self.init_params = np.linspace(1, 1, int(1.0 + X.shape[1] * 2.0))
return self.init_params
def model_filter(self, params, X):
X = create_matrix(X, self.lag)
model = params[0]
for i in range(1, len(X) + 1):
model += params[2 * i - 1] * Beta().x_weighted(X['X{num}'.format(num = i)], [1.0, params[2 * i]])
if self.exp == True:
return np.exp(model)
else:
return model
def loglikelihood(self, params, X, y):
try:
y_len, y_col = y.shape
except:
y_len, y_col = y.shape[0], 1
y_nan = y.isna().sum().values
self.tau_t = np.zeros(y_len)
tau = self.model_filter(params, X)
T = X.shape[0]
j = 0
for i in range(T - 1):
if i == 0:
index = y[y.index < X.index[i + 1]].index
else:
index = y[(y.index >= X.index[i]) & (y.index < X.index[i + 1])].index
mat = np.linspace(tau[i], tau[i], index.shape[0])
self.tau_t[j : j + index.shape[0]] = mat
j += index.shape[0]
lls = 0
for i in range(y_col):
if y_nan[i] >= y_len:
lls += 0
else:
lls += loglikelihood_normal(y.iloc[y_nan[i]:, i].values, self.tau_t[y_nan[i]:])
return lls
def simulate(self, params = [0.1, 0.3, 4.0], num = 500, K = 12, panel = 100):
X = np.zeros(num)
tau = np.zeros(num)
r = np.zeros((num * 22, panel))
j = 0
month = []
m_dates = []
y_dates = []
for t in range(num):
if t == 0:
X[t] = np.random.normal()
else:
X[t] = 0.9 * X[t - 1] + np.random.normal()
for t in range(1, num + 1):
if t < K + 1:
tau[t - 1] = np.exp(params[0] + params[1] * Beta().x_weighted(X[:t][::-1].reshape((1, X[:t].shape[0])), [1.0, params[2]]))
else:
tau[t - 1] = np.exp(params[0] + params[1] * Beta().x_weighted(X[t - K : t][::-1].reshape((1, K)), [1.0, params[2]]))
r[(t - 1) * 22 : t * 22] = np.random.normal(scale = np.sqrt(tau[t - 1]), size = (22, panel))
for i in range(num):
month.append(i % 12)
for i in month:
if i == 0:
j += 1
m_dates.append(datetime(2010 + j, 1, 1))
else:
m_dates.append(datetime(2010 + j, 1 + i, 1))
for i in m_dates[:-1]:
for j in range(22):
y_dates.append(i + timedelta(j))
y = pd.DataFrame(data = r[:-22], index = y_dates)
X = pd.DataFrame(data = X, index = m_dates)
return X, y, tau
def create_sims(self, number_of_sims = 500, length = 100, K = 12, params = [0.1, 0.3, 4.0], panel = 200):
lls, b0, b1, th, runtime = np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims)
for i in range(number_of_sims):
np.random.seed(i)
X, y, _ = self.simulate(params = params, num = length, K = K, panel = panel)
start = time.time()
self.fit(['pos', 'pos', 'pos'], X, y)
runtime[i] = time.time() - start
lls[i] = self.opt.fun
b0[i], b1[i], th[i] = self.optimized_params[0], self.optimized_params[1], self.optimized_params[2]
print("{}st iteration's runTime: {} sec.\n".format(i + 1, round(runtime[i], 4)))
return pd.DataFrame(data = {'LogLike': lls,
'Beta0': b0,
'Beta1': b1,
'Theta':th})
class Panel_GARCH_MIDAS(object):
def __init__(self, lag = 12, plot = True, exp = True, *args):
self.lag = lag
self.exp = exp
self.plot = plot
self.args = args
def fit(self, restriction_midas, restriction_garch, X, y):
self.midas = Panel_MIDAS(lag = self.lag, plot = self.plot, exp = self.exp)
if self.plot == True:
print('Estimated parameters for the MIDAS equation:\n')
else:
pass
self.midas.fit(restriction_midas, X, y)
y_hat = self.calculate_y_hat(y, self.midas.tau_t)
self.garch = Panel_GARCH(plot = self.plot)
if self.plot == True:
print('\nEstimated parameters for the GARCH equation:\n')
else:
pass
self.garch.fit(restriction_garch, y_hat)
def calculate_y_hat(self, y, tau):
y_hat = np.zeros_like(y)
for i in range(y.shape[0]):
for j in range(y.shape[1]):
y_hat[i][j] = y.iloc[i, j] / np.sqrt(tau[i])
y_hat = pd.DataFrame(data = y_hat, index = y.index, columns = y.columns)
return y_hat
def simulate(self, midas_params = [0.1, 0.3, 4.0], garch_params = [0.06, 0.8], num = 500, K = 12, panel = 100):
beta0, beta1, theta = midas_params[0], midas_params[1], midas_params[2]
alpha, beta = garch_params[0], garch_params[1]
X = np.zeros(num)
tau = np.zeros(num)
r = np.zeros((num * 22, panel))
g = np.zeros((num * 22, panel))
j = 0
month = []
m_dates = []
y_dates = []
for t in range(num):
if t == 0:
X[t] = np.random.normal()
else:
X[t] = 0.9 * X[t - 1] + np.random.normal()
for t in range(1, num + 1):
if t < K + 1:
tau[t - 1] = np.exp(beta0 + beta1 * Beta().x_weighted(X[:t][::-1].reshape((1, X[:t].shape[0])), [1.0, theta]))
else:
tau[t - 1] = np.exp(beta0 + beta1 * Beta().x_weighted(X[t - K : t][::-1].reshape((1, K)), [1.0, theta]))
for i in range((t - 1) * 22, t * 22):
if i == 0:
g[i] = np.ones(panel)
else:
g[i] = (1 - alpha - beta) + alpha * (r[i - 1] ** 2) / tau[t - 1] + beta * g[i - 1]
r[i] = np.random.normal(scale = np.sqrt(g[i] * tau[t - 1]), size = panel)
for i in range(num):
month.append(i % 12)
for i in month:
if i == 0:
j += 1
m_dates.append(datetime(2010 + j, 1, 1))
else:
m_dates.append(datetime(2010 + j, 1 + i, 1))
for i in m_dates[:-1]:
for j in range(22):
y_dates.append(i + timedelta(j))
y = pd.DataFrame(data = r[:-22], index = y_dates)
X = pd.DataFrame(data = X, index = m_dates)
return X, y, tau, g
def create_sims(self, number_of_sims = 500, length = 100, K = 12, midas_params = [0.1, 0.3, 4.0], garch_params = [0.06, 0.8]):
b0, b1, th, al, bt, runtime = np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims), np.zeros(number_of_sims)
for i in range(number_of_sims):
np.random.seed(i)
X, y, _, _ = self.simulate(midas_params = midas_params, garch_params = garch_params, num = length, K = K, panel = 100)
start = time.time()
self.fit(['pos', 'pos', 'pos'], ['01', '01'], X, y)
runtime[i] = time.time() - start
b0[i], b1[i], th[i], al[i], bt[i] = self.midas.optimized_params[0], self.midas.optimized_params[1], self.midas.optimized_params[2], self.garch.optimized_params[0], self.garch.optimized_params[1]
print("{}st iteration's runTime: {} sec.\n".format(i + 1, round(runtime[i], 4)))
return pd.DataFrame(data = {'Beta0': b0,
'Beta1': b1,
'Theta':th,
'Alpha': al,
'Beta': bt})
def forecast(self, y, H = 5, plotting = True):
from pandas.tseries.offsets import BDay
import matplotlib.pyplot as plt
forecast = np.zeros(H)
mu = np.mean(y ** 2)
alpha = self.garch.optimized_params[0]
beta = self.garch.optimized_params[1]
y_hat = y / self.midas.tau_t
sigma2 = self.garch.model_filter(self.garch.optimized_params, y_hat)
for i in range(1, H + 1):
forecast[i - 1] = (mu * (1 - (alpha + beta) ** (i - 1)) + sigma2[-1] * (alpha + beta) ** (i - 1)) * self.midas.tau_t[-1]
forc = np.zeros(len(y) + H)
forc[:-H] = sigma2 * self.midas.tau_t
forc[-H:] = forecast
if isinstance(y, pd.core.series.Series) or isinstance(y, pd.core.frame.DataFrame):
index = []
for i in range(len(y) + H):
if i < len(y):
index.append(y.index[i])
else:
index.append(y.index[-1] + BDay(i - len(y.index) + 1))
forecasted_series = pd.Series(data = forc, index = index)
if plotting == True:
plt.figure(figsize = (15, 5))
plt.plot(forecasted_series[forecasted_series.index <=
|
pd.to_datetime(y.index[-1])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from pathos import pools as pp
import pickle as pkl
from UserCentricMeasurements import *
from ContentCentricMeasurements import *
from CommunityCentricMeasurements import *
from TEMeasurements import *
from collections import defaultdict
import jpype
import json
import os
basedir = os.path.dirname(__file__)
class BaselineMeasurements(UserCentricMeasurements, ContentCentricMeasurements, TEMeasurements, CommunityCentricMeasurements):
def __init__(self,
dfLoc,
content_node_ids=[],
user_node_ids=[],
metaContentData=False,
metaUserData=False,
contentActorsFile=os.path.join(basedir, './baseline_challenge_data/filtUsers-baseline.pkl'),
contentFile=os.path.join(basedir, './baseline_challenge_data/filtRepos-baseline.pkl'),
topNodes=[],
topEdges=[],
previousActionsFile='',
community_dictionary='',
# community_dictionary=os.path.join(basedir, './baseline_challenge_data/baseline_challenge_community_dict.pkl'),
te_config=os.path.join(basedir, './baseline_challenge_data/te_params_baseline.json'),
platform='github',
use_java=True):
super(BaselineMeasurements, self).__init__()
self.platform = platform
try:
# check if input is a data frame
dfLoc.columns
df = dfLoc
except:
# if not it should be a csv file path
df =
|
pd.read_csv(dfLoc)
|
pandas.read_csv
|
import os
import pandas as pd
import numpy as np
from itertools import product, combinations
from graphpype.utils_stats import (compute_oneway_anova_fwe,
compute_pairwise_ttest_fdr)
from graphpype.utils_cor import return_corres_correl_mat
def isInAlphabeticalOrder(word):
return list(word) == sorted(word)
def return_all_iter_cormats(cormat_path, iterables, iternames,
gm_mask_coords_file=0, gm_mask_labels_file=0,
mapflow_iterables=0, mapflow_iternames=0,
export_df=False):
"""
gm_mask_coords_file is the coords commun to all analyses
"""
print(product(*iterables))
all_iter_cormats = []
all_descriptors = []
assert isInAlphabeticalOrder(iternames), \
("Warning, iternames are not in alphabetical oroder, check the \
iterables order as well")
if gm_mask_coords_file != 0:
gm_mask_coords = np.loadtxt(gm_mask_coords_file)
print(gm_mask_coords)
if export_df:
writer = pd.ExcelWriter(os.path.join(cormat_path, "all_cormats.xls"))
for iter_obj in product(*iterables):
print(iter_obj)
assert len(iter_obj) == len(
iternames), "Error, different number of iternames and iterables"
iter_dir = "".join(["_" + zip_iter[0].strip() + "_" +
zip_iter[1].strip() for zip_iter in zip(iternames,
iter_obj)])
print(iter_dir)
if mapflow_iterables == 0:
cormat_file = os.path.join(
cormat_path, iter_dir, "compute_conf_cor_mat",
"Z_cor_mat_resid_ts.npy")
assert os.path.exists(cormat_file), \
("Warning, file {} could not be found".format(cormat_file))
cormat = np.load(cormat_file)
print(cormat.shape)
if gm_mask_coords_file != 0:
coords_file = os.path.join(
cormat_path, iter_dir, "extract_mean_ROI_ts",
"subj_coord_rois.txt")
coords = np.loadtxt(coords_file)
cormat, _ = return_corres_correl_mat(
cormat, coords, gm_mask_coords)
if export_df:
if gm_mask_labels_file:
labels = [line.strip()
for line in open(gm_mask_labels_file)]
else:
labels = list(range(cormat.shape[0]))
df =
|
pd.DataFrame(cormat, columns=labels, index=labels)
|
pandas.DataFrame
|
import os
import glob
import pandas as pd
import datetime as dt
from typing import Any, Iterator, Sequence, Optional, Union
from ..logger import get_logger
from .. import files, sql
from . import connection
logger = get_logger(__name__)
def upload_csv(
csv_path: str,
schema: str,
table: str,
*,
separator: str = ',',
bucket: str = 'gismart-analytics',
bucket_dir: str = 'dwh/temp',
columns: Optional[Sequence] = None,
delete_s3_after: bool = True,
secret_id: str = 'prod/redshift/analytics',
) -> None:
'''Upload csv file to S3 and copy to Redshift'''
if not columns:
columns = files.csv_columns(csv_path, separator=separator)
table_columns = f'{schema}.{table} ({",".join(columns)})'
with connection.get_redshift(secret_id) as redshift_locopy:
redshift_locopy.load_and_copy(
local_file=csv_path,
s3_bucket=bucket,
s3_folder=bucket_dir,
table_name=table_columns,
delim=separator,
copy_options=['IGNOREHEADER AS 1', 'REMOVEQUOTES'],
delete_s3_after=delete_s3_after,
compress=False,
)
filename = os.path.basename(csv_path)
logger.info(f'{filename} is uploaded to db')
def download_csv(
query: str,
data_dir: Optional[str] = None,
*,
separator: str = ',',
bucket: str = 'gismart-analytics',
bucket_dir: str = 'dwh/temp',
delete_s3_after: bool = True,
secret_id: str = 'prod/redshift/analytics',
) -> Sequence[str]:
'''Copy data from RedShift to S3 and download csv files up to 6.2 GB'''
if data_dir and not os.path.exists(data_dir):
os.makedirs(data_dir)
with connection.get_redshift(secret_id) as redshift_locopy:
redshift_locopy.unload_and_copy(
query=query,
s3_bucket=bucket,
s3_folder=bucket_dir,
export_path=False,
raw_unload_path=data_dir,
delimiter=separator,
delete_s3_after=delete_s3_after,
parallel_off=False,
unload_options=['CSV', 'HEADER', 'GZIP', 'PARALLEL ON', 'ALLOWOVERWRITE'],
)
logger.info('Data is downloaded to csv files')
filenames = glob.glob(os.path.join(data_dir or os.getcwd(), '*part_00.gz'))
return filenames
def upload_data(
data: pd.DataFrame,
csv_path: str,
schema: str,
table: str,
*,
separator: str = ',',
bucket: str = 'gismart-analytics',
bucket_dir: str = 'dwh/temp',
columns: Optional[Sequence] = None,
remove_csv: bool = False,
secret_id: str = 'prod/redshift/analytics',
) -> None:
'''Save data to csv and upload it to RedShift via S3'''
filename = os.path.basename(csv_path)
filedir = os.path.dirname(csv_path)
if not os.path.exists(filedir):
os.mkdir(filedir)
data.to_csv(csv_path, index=False, columns=columns)
logger.info(f'Data is saved to {filename}')
upload_csv(
csv_path=csv_path,
schema=schema,
table=table,
separator=separator,
bucket=bucket,
bucket_dir=bucket_dir,
columns=columns,
secret_id=secret_id,
)
if remove_csv:
os.remove(csv_path)
logger.info(f'{filename} is removed')
def download_data(
query: str,
*,
temp_dir: str = '/tmp',
separator: str = ',',
bucket: str = 'gismart-analytics',
bucket_dir: str = 'dwh/temp',
parse_dates: Optional[Sequence[str]] = None,
parse_bools: Optional[Sequence[str]] = None,
dtype: Optional[dict] = None,
chunking: bool = False,
secret_id: str = 'prod/redshift/analytics',
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
'''Download data from Redshift via S3'''
dtype = dtype or {}
parse_bools = parse_bools or []
temp_path = os.path.join(temp_dir, str(dt.datetime.now()))
filenames = download_csv(
query=query,
data_dir=temp_path,
separator=separator,
bucket=bucket,
bucket_dir=bucket_dir,
secret_id=secret_id,
)
chunks = _read_chunks(
filenames,
separator=separator,
parse_dates=parse_dates,
parse_bools=parse_bools,
dtype=dtype,
)
if chunking:
return chunks
else:
data =
|
pd.concat(chunks, ignore_index=True)
|
pandas.concat
|
"""
@brief test tree node (time=2s)
"""
import unittest
import pandas
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pycode.pip_helper import (
get_packages_list, package2dict, get_package_info,
PQPipError)
class TestPipHelper(ExtTestCase):
def test_exc(self):
exc = PQPipError('cmd', 'out', 'err')
msg = str(exc)
self.assertEqual([msg.replace('\n', '')], [
'CMD:cmdOUT:out[piperror]err'])
def test_pip_list(self):
li = get_packages_list()
dt = package2dict(li[0])
avoid = {'py_version'}
empty = []
for k, v in dt.items():
if k not in avoid:
if k is None:
empty.append(k)
self.assertEmpty(empty)
self.assertNotEmpty(li)
def test_pip_show(self):
info = get_package_info("pandas")
if "version" not in str(info):
raise AssertionError(str(info))
info = get_package_info("sphinx")
if "version" not in str(info):
raise Exception(str(info))
def test_pip_show_all(self):
info = get_package_info(start=0, end=2)
df = pandas.DataFrame(info)
self.assertNotEmpty(info)
if __name__ == "__main__":
info = get_package_info()
df =
|
pandas.DataFrame(info)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs =
|
Series(v2)
|
pandas.Series
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
# gather
import pandas as pd
import io
import time
import zipfile
import zlib
import urllib.request
urllib.request.urlretrieve('http://geoportal1-ons.opendata.arcgis.com/datasets/48d0b49ff7ec4ad0a4f7f8188f6143e8_3.zip',
'constituencies_super_generalised_shapefile.zip')
with zipfile.ZipFile('constituencies_super_generalised_shapefile.zip', 'r') as zip_ref:
zip_ref.extractall('constituencies_super_generalised_shapefile')
petition_list = pd.read_csv(
'https://petition.parliament.uk/archived/petitions.csv?parliament=1&state=published')
url_list = petition_list['URL'].tolist()
count, start = 0, time.time()
signatures, mp, errors = [], [], []
for petition_url in url_list:
try:
response = pd.read_json(petition_url + '.json')
response = pd.DataFrame.from_dict(response.iloc[0, 0], orient='index')
created_at = response.loc['created_at', 0]
response = pd.DataFrame.from_dict(
response.loc['signatures_by_constituency', 0])
response['created'] = created_at
signatures.extend(
response[['ons_code', 'signature_count', 'created']].values.tolist())
mp.extend(
response[['ons_code', 'name', 'mp']].values.tolist())
except:
errors.append(petition_url)
count += 1
if count % 250 == 0:
print('{} files reached in {}s'.format(count, time.time() - start))
if len(errors) != 0:
print(errors)
signatures = pd.DataFrame(
signatures, columns=['ons_code', 'signature_count', 'date'])
signatures['date'] = pd.to_datetime(signatures['date'])
signatures = signatures.set_index('date').groupby(
[pd.TimeGrouper(freq='M'), 'ons_code']).sum().reset_index().sort_values(['ons_code', 'date'])
signatures['date'] = signatures.date.dt.to_period('M')
mp = pd.DataFrame(mp, columns=['ons_code', 'constituency', 'mp']).drop_duplicates(
'ons_code', keep='last')
mp = mp.replace('Ynys M?n', 'Ynys Mon')
population =
|
pd.read_excel(
'http://data.parliament.uk/resources/constituencystatistics/Population-by-age.xlsx', 'Data')
|
pandas.read_excel
|
import pandas as pd
import os
import re
import numpy as np
import pprint
import logging
'''
@Author: <NAME>
This script extracts voting members using the minutes of FOMC meetings, and then appends a manual verification for certain values.
'''
def main():
voter_df = get_voters()
get_errors(voter_df)
merge_error_correction(voter_df)
#merge_voting_members_with_alternatives()
def get_voters():
df = pd.read_excel("../data/fomc_dissents_data.xlsx",skiprows=3)
df["Date"] = df["FOMC Meeting"].apply(lambda x:str(x).split(" ")[0])
df['FOMC Votes'] = df['FOMC Votes'].apply(lambda x:0 if np.isnan(x) else x)
df['date'] = pd.to_datetime(df["Date"])
df['start_date'] = df['date'] - pd.Timedelta('1 days')
df['start_date']=df['start_date'].dt.date
df['date']=df['date'].dt.date
df[['date','start_date']].head()
voter_df = pd.DataFrame()
for index,row in df.iterrows():
voters = []
num_voters = int(row['FOMC Votes'])
date_path = '../../../collection/python/data/transcript_raw_text/{}.txt'.format(row['Date'])
if not os.path.exists(date_path):
print("Date not found")
date_path = '../../../collection/python/data/transcript_raw_text/{}.txt'.format(row['start_date'])
if not os.path.exists(date_path):
print("Alternative date not found")
continue
else:
print('Process alternative date')
with open(date_path) as f:
broken = False
broken_starts = 0
broken_ends = 0
lines = f.readlines()
'''First Check For Broken Title'''
#print("CHECKING USING FRAGMENT HEURISTIC")
for line in lines[:200]:
if line.strip():
if broken_ends==0:
title_frag = re.match(r'^(?:PRESENT: |PRESENT. )?(?:Mr.|Ms.|Mt.|Mrs. )$',line.strip())
if title_frag:
if not broken:
broken = True
#print("Broken Begining")
#print(title_frag.group(0))
title_frag_string = str(title_frag.group(0)).replace("PRESENT: ","")
voters.append(title_frag_string)
broken_starts+=1
continue
if broken and broken_ends<len(voters):
name_fragment = re.match('^[A-Z][a-z][A-Za-z]*',line.strip())
if name_fragment:
voters[broken_ends] = voters[broken_ends]+" "+str(name_fragment.group(0))
broken_ends+=1
'''Check using Mr. Regex'''
if len(voters)==0:
#print("CHECKING TITLE REGEX")
for line in lines[:200]:
'''Then check for valid input'''
voter_line = re.findall(r'(?:Mr.|Ms.|Mrs.) [A-Z][a-zA-Z]*',line.strip())
if voter_line:
#print(voter_line)
voters.append(voter_line[0])
if len(voters)>=num_voters:
break
'''Check Last Name Regex'''
if len(voters) == 0:
#print("Checking POST-PRESENT-NAME HEURISTIC")
found_present = False
for line in lines[:200]:
if "PRESENT:" in line.strip() or "PRESENT." in line.strip():
found_present = True
present_line = line.split(",")[0].strip().replace("PRESENT","")
name_text = re.match('[A-Z][a-z]*\s?(?:[A-Z][a-z]*)?',present_line)
if name_text:
voters.append(name_text.group(0))
continue
if found_present:
#print(line)
name_text = re.match('[A-Z][a-z]*\s?(?:[A-Z][a-z]*)?',line.split(",")[0].strip())
if name_text:
voters.append(name_text.group(0))
if len(voters)>=num_voters:
break
#print('Date:{}'.format(row['Date']))
#print("Broken Status:{}".format(broken))
#print("Voter Number:{}".format(num_voters))
#print("Voters Found:{}".format(len(voters)))
#pprint.pprint(voters)
voter_df = voter_df.append({
"Date":row['FOMC Meeting'],
"voters_expected":num_voters,
"voters_observed":len(voters),
"Voters":voters if num_voters==len(voters) else None,
},ignore_index=True)
#print("="*50)
print(voter_df)
return voter_df
def get_errors(voter_df):
print(len(voter_df[voter_df["Voters"].isna()]))
voter_errors = voter_df[voter_df["Voters"].isna()].reset_index(drop=True)
voter_errors.to_csv("../output/voter_errors.csv",index=False)
def merge_error_correction(voter_df):
correction_df = pd.read_csv("../data/voter_corrections.csv")
correction_df['Date'] = pd.to_datetime(correction_df['Date'])
voter_df['Date'] = pd.to_datetime(voter_df['Date'])
voter_df = pd.concat([voter_df,correction_df])
voter_df = voter_df.drop_duplicates(['Date'], keep="last").sort_values(by="Date")
voter_df = voter_df[(voter_df['Date'].dt.year>1987)&(voter_df['Date'].dt.year<2010)]
voter_df.to_csv("../output/voting_members.csv",index=False)
def merge_voting_members_with_alternatives():
voting_df = pd.read_csv("../output/voting_members.csv")
alt_df =
|
pd.read_csv("../output/alternative_outcomes_and_corpus.csv")
|
pandas.read_csv
|
# This file is part of Patsy
# Copyright (C) 2012-2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Exhaustive end-to-end tests of the top-level API.
import sys
import __future__
import six
import numpy as np
from nose.tools import assert_raises
from patsy import PatsyError
from patsy.design_info import DesignMatrix, DesignInfo
from patsy.eval import EvalEnvironment
from patsy.desc import ModelDesc, Term, INTERCEPT
from patsy.categorical import C
from patsy.contrasts import Helmert
from patsy.user_util import balanced, LookupFactor
from patsy.build import (design_matrix_builders,
build_design_matrices)
from patsy.highlevel import *
from patsy.util import (have_pandas,
have_pandas_categorical,
have_pandas_categorical_dtype,
pandas_Categorical_from_codes)
from patsy.origin import Origin
if have_pandas:
import pandas
def check_result(expect_full_designs, lhs, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names): # pragma: no cover
assert np.allclose(rhs, expected_rhs_values)
assert rhs.design_info.column_names == expected_rhs_names
if lhs is not None:
assert np.allclose(lhs, expected_lhs_values)
assert lhs.design_info.column_names == expected_lhs_names
else:
assert expected_lhs_values is None
assert expected_lhs_names is None
if expect_full_designs:
if lhs is None:
new_rhs, = build_design_matrices([rhs.design_info], data)
else:
new_lhs, new_rhs = build_design_matrices([lhs.design_info,
rhs.design_info],
data)
assert np.allclose(new_lhs, lhs)
assert new_lhs.design_info.column_names == expected_lhs_names
assert np.allclose(new_rhs, rhs)
assert new_rhs.design_info.column_names == expected_rhs_names
else:
assert rhs.design_info.terms is None
assert lhs is None or lhs.design_info.terms is None
def dmatrix_pandas(formula_like, data={}, depth=0, return_type="matrix"):
return_type = "dataframe"
if isinstance(depth, int):
depth += 1
return dmatrix(formula_like, data, depth, return_type=return_type)
def dmatrices_pandas(formula_like, data={}, depth=0, return_type="matrix"):
return_type = "dataframe"
if isinstance(depth, int):
depth += 1
return dmatrices(formula_like, data, depth, return_type=return_type)
def t(formula_like, data, depth,
expect_full_designs,
expected_rhs_values, expected_rhs_names,
expected_lhs_values=None, expected_lhs_names=None): # pragma: no cover
if isinstance(depth, int):
depth += 1
def data_iter_maker():
return iter([data])
if (isinstance(formula_like, six.string_types + (ModelDesc, DesignInfo))
or (isinstance(formula_like, tuple)
and isinstance(formula_like[0], DesignInfo))
or hasattr(formula_like, "__patsy_get_model_desc__")):
if expected_lhs_values is None:
builder = incr_dbuilder(formula_like, data_iter_maker, depth)
lhs = None
(rhs,) = build_design_matrices([builder], data)
else:
builders = incr_dbuilders(formula_like, data_iter_maker, depth)
lhs, rhs = build_design_matrices(builders, data)
check_result(expect_full_designs, lhs, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names)
else:
assert_raises(PatsyError, incr_dbuilders,
formula_like, data_iter_maker)
assert_raises(PatsyError, incr_dbuilder,
formula_like, data_iter_maker)
one_mat_fs = [dmatrix]
two_mat_fs = [dmatrices]
if have_pandas:
one_mat_fs.append(dmatrix_pandas)
two_mat_fs.append(dmatrices_pandas)
if expected_lhs_values is None:
for f in one_mat_fs:
rhs = f(formula_like, data, depth)
check_result(expect_full_designs, None, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names)
# We inline assert_raises here to avoid complications with the
# depth argument.
for f in two_mat_fs:
try:
f(formula_like, data, depth)
except PatsyError:
pass
else:
raise AssertionError
else:
for f in one_mat_fs:
try:
f(formula_like, data, depth)
except PatsyError:
pass
else:
raise AssertionError
for f in two_mat_fs:
(lhs, rhs) = f(formula_like, data, depth)
check_result(expect_full_designs, lhs, rhs, data,
expected_rhs_values, expected_rhs_names,
expected_lhs_values, expected_lhs_names)
def t_invalid(formula_like, data, depth, exc=PatsyError): # pragma: no cover
if isinstance(depth, int):
depth += 1
fs = [dmatrix, dmatrices]
if have_pandas:
fs += [dmatrix_pandas, dmatrices_pandas]
for f in fs:
try:
f(formula_like, data, depth)
except exc:
pass
else:
raise AssertionError
# Exercise all the different calling conventions for the high-level API
def test_formula_likes():
# Plain array-like, rhs only
t([[1, 2, 3], [4, 5, 6]], {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
t((None, [[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
t(np.asarray([[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
t((None, np.asarray([[1, 2, 3], [4, 5, 6]])), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"])
dm = DesignMatrix([[1, 2, 3], [4, 5, 6]], default_column_prefix="foo")
t(dm, {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["foo0", "foo1", "foo2"])
t((None, dm), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["foo0", "foo1", "foo2"])
# Plain array-likes, lhs and rhs
t(([1, 2], [[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
t(([[1], [2]], [[1, 2, 3], [4, 5, 6]]), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
t((np.asarray([1, 2]), np.asarray([[1, 2, 3], [4, 5, 6]])), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
t((np.asarray([[1], [2]]), np.asarray([[1, 2, 3], [4, 5, 6]])), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["x0", "x1", "x2"],
[[1], [2]], ["y0"])
x_dm = DesignMatrix([[1, 2, 3], [4, 5, 6]], default_column_prefix="foo")
y_dm = DesignMatrix([1, 2], default_column_prefix="bar")
t((y_dm, x_dm), {}, 0,
False,
[[1, 2, 3], [4, 5, 6]], ["foo0", "foo1", "foo2"],
[[1], [2]], ["bar0"])
# number of rows must match
t_invalid(([1, 2, 3], [[1, 2, 3], [4, 5, 6]]), {}, 0)
# tuples must have the right size
t_invalid(([[1, 2, 3]],), {}, 0)
t_invalid(([[1, 2, 3]], [[1, 2, 3]], [[1, 2, 3]]), {}, 0)
# plain Series and DataFrames
if have_pandas:
# Names are extracted
t(pandas.DataFrame({"x": [1, 2, 3]}), {}, 0,
False,
[[1], [2], [3]], ["x"])
t(pandas.Series([1, 2, 3], name="asdf"), {}, 0,
False,
[[1], [2], [3]], ["asdf"])
t((
|
pandas.DataFrame({"y": [4, 5, 6]})
|
pandas.DataFrame
|
#!/usr/bin/env python
# CREATE DATE: 11 March 2015
# AUTHOR: <NAME>
# Derived from run-performance-test.sh.
# This script runs a performance test on the crux toolkit. Its usage
# and output are described in ./performance.html. The MS2 file comes
# from the PAnDA paper by Hoopman et al. (JPR 2009). Extensive
# documentation of this particular data set is available from
# crux-projects/panda-data.
import sys
import subprocess
import os
import pandas as pd
# The location of the crux binary.
CRUX = "../../src/crux"
# Input files.
database = "worm+contaminants"
ms2 = "051708-worm-ASMS-10.ms2"
#############################################################################
# Run a command with error checking.
def runCommand(command, outputFileName):
# Skip the command if the output file already exists.
if (outputFileName != "") and (os.path.exists(outputFileName)):
sys.stderr.write("%s exists.\n" % outputFileName)
return
sys.stderr.write("RUN: %s\n" % command)
try:
returnCode = subprocess.call(command, shell=True)
if (returnCode != 0):
sys.stderr.write("Child was terminated by signal %d\n" % -returnCode)
sys.exit(1)
except OSError as e:
sys.stderr.write("Execution failed: %s\n" % e)
sys.exit(1)
#############################################################################
def createParameterFile(parameterFileName):
parameterFile = open(parameterFileName, "w")
# Enzymatic digestion rules.
parameterFile.write("enzyme=trypsin\n")
parameterFile.write("search_enzyme_number=1\n")
parameterFile.write("digestion=full-digest\n")
parameterFile.write("num_enzyme_termini=2\n")
parameterFile.write("missed-cleavages=0\n")
parameterFile.write("allowed_missed_cleavage=0\n")
# Minimums
parameterFile.write("minimum_peaks=10\n")
parameterFile.write("min-peaks=10\n")
# Precursor selection rules.
parameterFile.write("precursor-window=3\n")
parameterFile.write("precursor-window-type=mass\n")
parameterFile.write("peptide_mass_tolerance=3\n")
parameterFile.write("peptide_mass_units=0\n") # 0=amu, 1=mmu, 2=ppm
# Precursor mass type.
parameterFile.write("isotopic-mass=mono\n")
parameterFile.write("mass_type_parent=1\n") # 1=monoisotopic
# Fragment mass type. Tides uses only monoisotopic.
parameterFile.write("fragment-mass=mono\n")
parameterFile.write("mass_type_fragment=1\n") # 1=monoisotopic
# Decoys.
parameterFile.write("decoy-format=peptide-reverse\n")
parameterFile.write("decoy_search=1\n") # 1 = concatenated decoy search
parameterFile.write("concat=T\n")
parameterFile.write("keep-terminal-aminos=C\n") # No corresponding Comet param
# Report the top 5 matches.
parameterFile.write("top-match=5\n")
parameterFile.write("num_results=6\n")
parameterFile.write("num_output_lines=5\n")
# Precursor removal.
parameterFile.write("remove-precursor-peak=T\n")
parameterFile.write("remove-precursor-tolerance=15\n")
parameterFile.write("remove_precursor_peak=1\n")
parameterFile.write("remove_precursor_tolerance=15\n")
# Flanking peaks.
parameterFile.write("use-flanking-peaks=F\n")
parameterFile.write("theoretical_fragment_ions=1\n") # 0 = flanks; 1 = no flanks
parameterFile.write("use-neutral-loss-peaks=F\n")
# Fragment m/z discretization.
parameterFile.write("mz-bin-offset=0.68\n")
parameterFile.write("mz-bin-width=1.0005079\n")
parameterFile.write("fragment_bin_offset=0.68\n")
parameterFile.write("fragment_bin_tol=1.0005079\n")
# Peptide mass range.
parameterFile.write("min-mass=200\n")
parameterFile.write("max-mass=7200\n")
parameterFile.write("digest_mass_range=200 7200\n")
# Other Crux parameters.
parameterFile.write("compute-sp=T\n")
parameterFile.write("overwrite=T\n")
parameterFile.write("peptide-list=T\n")
# Comet parameters
parameterFile.write("output_pepxmlfile=0\n")
parameterFile.write("add_C_cysteine=57.021464\n")
# parameterFile.write("num_threads=1\n") # Multithreaded sometimes dumps core.
parameterFile.write("max_fragment_charge=2\n")
parameterFile.write("isotope_error=0\n")
parameterFile.write("use_A_ions=0\n")
parameterFile.write("use_B_ions=1\n")
parameterFile.write("use_C_ions=0\n")
parameterFile.write("use_X_ions=0\n")
parameterFile.write("use_Y_ions=1\n")
parameterFile.write("use_Z_ions=0\n")
parameterFile.write("use_NL_ions=0\n")
parameterFile.write("variable_mod01=0.0 X 0 3\n")
parameterFile.write("variable_mod02=0.0 X 0 3\n")
parameterFile.write("[COMET_ENZYME_INFO]\n")
parameterFile.write("0. No_enzyme 0 - -\n")
parameterFile.write("1. Trypsin 1 KR P\n")
parameterFile.close()
#############################################################################
def extractData(inputFileName, columnName, outputFileName):
# dataset = pd.read_csv(inputFileName, sep='\t')
# data_frame = pd.DataFrame(dataset)
data_frame = pd.read_csv(inputFileName, sep='\t')
data_frame = data_frame[[columnName]]
data_frame.to_csv(outputFileName, sep='\t', index=False)
#############################################################################
def runSearch(outputDirectory, searchName, searchParam, database,
psmFile, scoreColumn, confidenceParam):
runCommand("%s %s --output-dir %s --parameter-file %s %s %s %s"
% (CRUX, searchName, outputDirectory, parameterFileName,
searchParam, ms2, database),
psmFile)
confidenceFile = "%s/assign-confidence.target.txt" % outputDirectory
runCommand("%s assign-confidence --output-dir %s %s %s" %
(CRUX, outputDirectory, confidenceParam, psmFile), confidenceFile)
qFile = "%s/%s.q.txt" % (outputDirectory, searchName)
extractData(confidenceFile, "tdc q-value", qFile)
percolatorFile = "%s/percolator.target.psms.txt" % outputDirectory
runCommand("%s percolator --output-dir %s %s"
% (CRUX, outputDirectory, psmFile), percolatorFile)
qFile = "%s/%s.percolator.q.txt" % (outputDirectory, searchName)
extractData(percolatorFile, "percolator q-value", qFile)
fourColFile = "%s/%s.target.four-col.txt" % (outputDirectory, searchName)
dataset =
|
pd.read_csv(psmFile, sep='\t')
|
pandas.read_csv
|
import pandas as pd
import os
import requests
import json
from requests.adapters import HTTPAdapter
import uuid
def get_boundary_by_uid(uid, bmap_key):
bmap_boundary_url = 'https://map.baidu.com/?newmap=1&reqflag=pcmap&biz=1&from=webmap&da_par=direct&pcevaname=pc4.1&qt=ext&uid=' + uid + '&c=340&ext_ver=new&tn=B_NORMAL_MAP&nn=0&auth=fw9wVDQUyKS7%3DQ5eWeb5A21KZOG0NadNuxHNBxBBLBHtxjhNwzWWvy1uVt1GgvPUDZYOYIZuEt2gz4yYxGccZcuVtPWv3GuxNt%3DkVJ0IUvhgMZSguxzBEHLNRTVtlEeLZNz1%40Db17dDFC8zv7u%40ZPuxtfvSulnDjnCENTHEHH%40NXBvzXX3M%40J2mmiJ4Y&ie=utf-8&l=19&b=(12679382.095,2565580.38;12679884.095,2565907.38)&t=1573133634785'
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))
data = s.get(url=bmap_boundary_url, timeout=5, headers={"Connection": "close"})
data = data.text
print(bmap_boundary_url, data)
data = json.loads(data)
content = data['content']
if not 'geo' in content:
print('geo is not in content')
return None
geo = content['geo']
i = 0
strsss = ''
for jj in str(geo).split('|')[2].split('-')[1].split(','):
jj = str(jj).strip(';')
if i % 2 == 0:
strsss = strsss + str(jj) + ','
else:
strsss = strsss + str(jj) + ';'
i = i + 1
return strsss.strip(";")
def transform_coordinate_batch(coordinates, bmap_key):
req_url = 'http://api.map.baidu.com/geoconv/v1/?coords='+coordinates+'&from=6&to=5&ak=' + bmap_key
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))
data = s.get(req_url, timeout=5, headers={"Connection": "close"}) # , proxies=proxies
data = data.text
data = json.loads(data)
coords = ''
if data['status'] == 0:
result = data['result']
if len(result) > 0:
for res in result:
lng = res['x']
lat = res['y']
coords = coords + ";" + str(lng) + "," + str(lat)
return coords.strip(";")
def get_boundary(csv_file_path, bmap_key):
print('文件地址:', csv_file_path)
csv_file = pd.read_csv(csv_file_path, encoding='utf_8_sig')
data_csv = {}
uids, boundarys = [], []
for i in range(len(csv_file)):
uid = ''
try:
uid = csv_file['uid'][i]
uids.append(uid)
coordinates = get_boundary_by_uid(uid, bmap_key)
if coordinates is not None:
coords = transform_coordinate_batch(coordinates, bmap_key)
print('成功返回边界:', uid + ',' + coords)
boundarys.append(coords)
else:
boundarys.append(' ')
except Exception:
uids.append(uid)
boundarys.append(' ')
data_csv['uid'] = uids
data_csv['boundary'] = boundarys
df =
|
pd.DataFrame(data_csv)
|
pandas.DataFrame
|
# AUTOGENERATED! DO NOT EDIT! File to edit: queries.ipynb (unless otherwise specified).
__all__ = ['optimize_floats', 'optimize_ints', 'optimize_objects', 'df_optimize', 'connect_db', 'update_radcom',
'update_stel', 'update_mosaico', 'update_base', 'read_stel', 'read_radcom', 'read_mosaico', 'read_base']
# Cell
import requests
from decimal import *
from typing import *
from gazpacho import Soup
from rich.progress import track
from pathlib import Path
from unidecode import unidecode
import pandas as pd
import pandas_read_xml as pdx
import pyodbc
import re
import xml.etree.ElementTree as et
from zipfile import ZipFile
import collections
from fastcore.utils import listify
from fastcore.foundation import L
from fastcore.test import *
from .constants import *
from pyarrow import ArrowInvalid
getcontext().prec = 5
# Cell
def optimize_floats(df: pd.DataFrame, exclude = None) -> pd.DataFrame:
floats = df.select_dtypes(include=["float64"]).columns.tolist()
floats = [c for c in floats if c not in listify(exclude)]
df[floats] = df[floats].apply(pd.to_numeric, downcast="float")
return df
def optimize_ints(df: pd.DataFrame, exclude=None) -> pd.DataFrame:
ints = df.select_dtypes(include=["int64"]).columns.tolist()
ints = [c for c in ints if c not in listify(exclude)]
df[ints] = df[ints].apply(pd.to_numeric, downcast="integer")
return df
def optimize_objects(df: pd.DataFrame, datetime_features: List[str], exclude=None) -> pd.DataFrame:
for col in df.select_dtypes(include=["object"]).columns.tolist():
if col not in datetime_features:
if col in listify(exclude): continue
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if float(num_unique_values) / num_total_values < 0.5:
dtype = "category"
else:
dtype = "string"
df[col] = df[col].astype(dtype)
else:
df[col] = pd.to_datetime(df[col]).dt.date
return df
def df_optimize(df: pd.DataFrame, datetime_features: List[str] = [], exclude = None):
return optimize_floats(optimize_ints(optimize_objects(df, datetime_features, exclude), exclude), exclude)
# Cell
def connect_db():
"""Conecta ao Banco ANATELBDRO01 e retorna o 'cursor' (iterador) do Banco pronto para fazer iterações"""
conn = pyodbc.connect(
"Driver={ODBC Driver 17 for SQL Server};"
"Server=ANATELBDRO01;"
"Database=SITARWEB;"
"Trusted_Connection=yes;"
"MultipleActiveResultSets=True;",
timeout=TIMEOUT,
)
return conn
# Internal Cell
def row2dict(row):
"""Receives a json row and return the dictionary from it"""
return {k: v for k, v in row.items()}
def dict2cols(df, reject=()):
"""Recebe um dataframe com dicionários nas células e extrai os dicionários como colunas
Opcionalmente ignora e exclue as colunas em reject
"""
for column in df.columns:
if column in reject:
df.drop(column, axis=1, inplace=True)
continue
if type(df[column].iloc[0]) == collections.OrderedDict:
try:
new_df = pd.DataFrame(df[column].apply(row2dict).tolist())
df = pd.concat([df, new_df], axis=1)
df.drop(column, axis=1, inplace=True)
except AttributeError:
continue
return df
def parse_plano_basico(row, cols=COL_PB):
"""Receives a json row and filter the column in `cols`"""
return {k: row[k] for k in cols}
def scrape_dataframe(id_list):
df = pd.DataFrame()
for id_ in track(id_list, description="Baixando informações complementares da Web"):
html = requests.get(ESTACAO.format(id_))
df = df.append(pd.read_html(Soup(html.text).find("table").html)[0])
df.rename(columns={'NumFistel': 'Fistel',
'Num Serviço': 'Num_Serviço'}, inplace=True)
return df[["Status", "Entidade", "Fistel", "Frequência", "Classe", 'Num_Serviço', 'Município', 'UF']]
# Internal Cell
def clean_merge(pasta, df):
df = df.copy()
COLS = [c for c in df.columns if "_x" in c]
for col in COLS:
col_x = col
col_y = col.split("_")[0] + "_y"
if df[col_x].count() > df[col_y].count():
a, b = col_x, col_y
else:
a, b = col_y, col_x
df.loc[df[a].isna(), a] = df.loc[df[a].isna(), b]
df.drop(b, axis=1, inplace=True)
df.rename({a: a[:-2]}, axis=1, inplace=True)
df.loc[df.Latitude_Transmissor == "", "Latitude_Transmissor"] = df.loc[
df.Latitude_Transmissor == "", "Latitude_Estação"
]
df.loc[df.Longitude_Transmissor == "", "Longitude_Transmissor"] = df.loc[
df.Longitude_Transmissor == "", "Longitude_Estação"
]
df.loc[df.Latitude_Transmissor.isna(), "Latitude_Transmissor"] = df.loc[
df.Latitude_Transmissor.isna(), "Latitude_Estação"
]
df.loc[df.Longitude_Transmissor.isna(), "Longitude_Transmissor"] = df.loc[
df.Longitude_Transmissor.isna(), "Longitude_Estação"
]
df.drop(["Latitude_Estação", "Longitude_Estação"], axis=1, inplace=True)
df.rename(
columns={
"Latitude_Transmissor": "Latitude",
"Longitude_Transmissor": "Longitude",
},
inplace=True,
)
municipios = Path(f"{pasta}/municípios.fth")
if not municipios.exists():
municipios = Path(f"{pasta}/municípios.xlsx")
if not municipios.exists():
raise FileNotFoundError(f"É necessario a tabela de municípios municípios.fth | municípios.xlsx na pasta {pasta}")
m = pd.read_excel(municipios, engine='openpyxl')
else:
m = pd.read_feather(municipios)
m.loc[
m.Município == "Sant'Ana do Livramento", "Município"
] = "Santana do Livramento"
m["Município"] = (
m.Município.apply(unidecode).str.lower().str.replace("'", " ")
)
m["UF"] = m.UF.str.lower()
df["Coordenadas_do_Município"] = False
df["Latitude"] = df.Latitude.str.replace(",", ".")
df["Longitude"] = df.Longitude.str.replace(",", ".")
df["Frequência"] = df.Frequência.str.replace(",", ".")
df.loc[df["Município"] == "Poxoréo", "Município"] = "Poxoréu"
df.loc[df["Município"] == "Couto de Magalhães", "Município"] = "Couto Magalhães"
df['Município'] = df.Município.astype('string')
criteria = ((df.Latitude == "") | (df.Latitude.isna()) | (df.Longitude == '') | (df.Longitude.isna())) & df.Município.isna()
df = df[~criteria]
for row in df[((df.Latitude == "") | (df.Latitude.isna()) | (df.Longitude == '') | (df.Longitude.isna()))].itertuples():
try:
left = unidecode(row.Município).lower()
m_coord = (
m.loc[
(m.Município == left) & (m.UF == row.UF.lower()),
["Latitude", "Longitude"],
]
.values.flatten()
.tolist()
)
df.loc[row.Index, "Latitude"] = m_coord[0]
df.loc[row.Index, "Longitude"] = m_coord[1]
df.loc[row.Index, "Coordenadas_do_Município"] = True
except ValueError:
print(left, row.UF, m_coord)
continue
freq_nans = df[df.Frequência.isna()].Id.tolist()
if freq_nans:
complement_df = scrape_dataframe(freq_nans)
df.loc[
df.Frequência.isna(), ["Status", "Entidade", "Fistel", "Frequência", "Classe",
'Num_Serviço', 'Município', 'UF']
] = complement_df.values
for r in df[(df.Entidade.isna()) | (df.Entidade == '')].itertuples():
df.loc[r.Index, 'Entidade'] = ENTIDADES.get(r.Fistel, '')
df.loc[df["Número_da_Estação"] == "", "Número_da_Estação"] = -1
df["Latitude"] = df["Latitude"].astype("float")
df["Longitude"] = df["Longitude"].astype("float")
df["Frequência"] = df.Frequência.astype("float")
df.loc[df.Serviço == 'OM', 'Frequência'] = df.loc[df.Serviço == 'OM', 'Frequência'].apply(lambda x: Decimal(x) / Decimal(1000))
df["Frequência"] = df.Frequência.astype("float")
df['Validade_RF'] = df.Validade_RF.astype('string').str.slice(0,10)
df.loc[df['Num_Ato'] == '', 'Num_Ato'] = -1
df['Num_Ato'] = df.Num_Ato.astype('string')
df['Num_Serviço'] = df.Num_Serviço.astype('category')
return df_optimize(df, exclude=['Frequência'])
# Internal Cell
def read_estações(path):
def extrair_ato(row):
if not isinstance(row, str):
row = listify(row)[::-1]
for d in row:
if not isinstance(d, dict):
continue
if (d.get("@TipoDocumento") == "Ato") and (
d.get("@Razao") == "Autoriza o Uso de Radiofrequência"
):
return d["@NumDocumento"], d["@DataDOU"][:10]
else:
return "", ""
return "", ""
es = pdx.read_xml(path, ["estacao_rd"])
dfs = []
for i in range(es.shape[0]):
df = pd.DataFrame(es["row"][i]).replace("", pd.NA)
df = dict2cols(df)
df.columns = [unidecode(c).lower().replace("@", "") for c in df.columns]
dfs.append(df)
df = pd.concat(dfs)
df = df[df.state.str.contains("-C1$|-C2$|-C3$|-C4$|-C7|-C98$")].reset_index(drop=True)
docs = L(df.historico_documentos.apply(extrair_ato).tolist())
df = df.loc[:, COL_ESTACOES]
df["Num_Ato"] = docs.itemgot(0).map(str)
df["Data_Ato"] = docs.itemgot(1).map(str)
df.columns = NEW_ESTACOES
df['Validade_RF'] = df.Validade_RF.astype('string').str.slice(0,10)
df["Data_Ato"] = df.Data_Ato.str.slice(0,10)
for c in df.columns:
df.loc[df[c] == '', c] = pd.NA
return df
def read_plano_basico(path):
pb = pdx.read_xml(path, ["plano_basico"])
dfs = []
for i in range(pb.shape[0]):
df = pd.DataFrame(pb["row"][i]).replace("", pd.NA)
df = dict2cols(df)
df.columns = [unidecode(c).lower().replace("@", "") for c in df.columns]
dfs.append(df)
df = pd.concat(dfs)
df = df.loc[df.pais == "BRA", COL_PB].reset_index(drop=True)
for c in df.columns:
df.loc[df[c] == '', c] = pd.NA
df.columns = NEW_PB
df.sort_values(["Id", "Canal"], inplace=True)
ENTIDADES.update({r.Fistel : r.Entidade for r in df.itertuples() if str(r.Entidade) == '<NA>'})
df = df.groupby("Id", as_index=False).first() # remove duplicated with NaNs
df.dropna(subset=['Status'], inplace=True)
df = df[df.Status.str.contains("-C1$|-C2$|-C3$|-C4$|-C7|-C98$")].reset_index(drop=True)
return df
#deprecated
def read_historico(path):
regex = r"\s([a-zA-Z]+)=\'{1}([\w\-\ :\.]*)\'{1}"
with ZipFile(path) as xmlzip:
with xmlzip.open("documento_historicos.xml", "r") as xml:
xml_list = xml.read().decode().split("\n")[2:-1]
dict_list = []
for item in xml_list:
matches = re.finditer(regex, item, re.MULTILINE)
dict_list.append(dict(match.groups() for match in matches))
df = pd.DataFrame(dict_list)
df = df[
(df.tipoDocumento == "Ato") & (df.razao == "Autoriza o Uso de Radiofrequência")
].reset_index()
df = df.loc[:, ["id", "numeroDocumento", "orgao", "dataDocumento"]]
df.columns = ["Id", "Num_Ato", "Órgao", "Data_Ato"]
df["Data_Ato"] = pd.to_datetime(df.Data_Ato)
return df.sort_values("Data_Ato").groupby("Id").last().reset_index()
# Cell
def update_radcom(pasta):
"""Atualiza a tabela local retornada pela query `RADCOM`"""
with console.status(
"[cyan]Lendo o Banco de Dados de Radcom...", spinner="earth"
) as status:
try:
conn = connect_db()
df = pd.read_sql_query(RADCOM, conn)
df['Unidade'] = 'MHz'
df.loc[df.Situação.isna(), 'Situação'] = 'M'
df = df_optimize(df, exclude=['Frequência'])
try:
df.to_feather(f"{pasta}/radcom.fth")
except ArrowInvalid:
Path(f"{pasta}/radcom.fth").unlink()
df.to_excel(f"{pasta}/radcom.xlsx", engine='openpyxl', index=False)
except pyodbc.OperationalError:
status.console.log(
"Não foi possível abrir uma conexão com o SQL Server. Esta conexão somente funciona da rede cabeada!"
)
def update_stel(pasta):
"""Atualiza a tabela local retornada pela query `STEL`"""
with console.status(
"[magenta]Lendo o Banco de Dados do STEL. Processo Lento, aguarde...",
spinner="moon",
) as status:
try:
conn = connect_db()
df = pd.read_sql_query(STEL, conn)
df['Validade_RF'] = df.Validade_RF.astype('str').str.slice(0,10)
df['Num_Serviço'] = df.Num_Serviço.astype('category')
df.loc[df.Unidade == 'kHz', 'Frequência'] = df.loc[df.Unidade == 'kHz', 'Frequência'].apply(lambda x: Decimal(x) / Decimal(1000))
df.loc[df.Unidade == 'GHz', 'Frequência'] = df.loc[df.Unidade == 'GHz', 'Frequência'].apply(lambda x: Decimal(x) * Decimal(1000))
df['Frequência'] = df.Frequência.astype('float')
df.loc[df.Unidade == 'kHz', 'Unidade'] = 'MHz'
df = df_optimize(df, exclude=['Frequência'])
try:
df.to_feather(f"{pasta}/stel.fth")
except ArrowInvalid:
Path(f"{pasta}/stel.fth").unlink()
df.to_excel(f"{pasta}/stel.xlsx", engine='openpyxl', index=False)
except pyodbc.OperationalError:
status.console.log(
"Não foi possível abrir uma conexão com o SQL Server. Esta conexão somente funciona da rede cabeada!"
)
def update_mosaico(pasta):
"""Atualiza a tabela local do Mosaico. É baixado e processado arquivos xml zipados da página pública do Spectrum E"""
with console.status(
"[blue]Baixando as Estações do Mosaico...", spinner="shark"
) as status:
file = requests.get(ESTACOES)
with open(f"{pasta}/estações.zip", "wb") as estações:
estações.write(file.content)
with console.status(
"[blue]Baixando o Plano Básico das Estações...", spinner="weather"
) as status:
file = requests.get(PLANO_BASICO)
with open(f"{pasta}/Canais.zip", "wb") as plano_basico:
plano_basico.write(file.content)
console.print(":package: [blue]Consolidando as bases de dados...")
estações = read_estações(f"{pasta}/estações.zip")
plano_basico = read_plano_basico(f"{pasta}/Canais.zip")
df = estações.merge(plano_basico, on="Id", how="left")
df['Número_da_Estação'] = df['Número_da_Estação'].fillna(-1)
df['Número_da_Estação'] = df['Número_da_Estação'].astype('int')
df = clean_merge(pasta, df)
try:
df.reset_index(drop=True).to_feather(f"{pasta}/mosaico.fth")
except ArrowInvalid:
Path(f"{pasta}/mosaico.fth").unlink()
with pd.ExcelWriter(f"{pasta}/mosaico.xlsx") as workbook:
df.reset_index(drop=True).to_excel(workbook, sheet_name='Sheet1', engine="openpyxl", index=False)
console.print("Kbô :vampire:")
return df
def update_base(pasta, up_stel=False, up_radcom=False, up_mosaico=False):
"""Wrapper que atualiza opcionalmente lê e atualiza as três bases indicadas anteriormente, as combina e salva o arquivo consolidado na pasta `pasta`"""
stel = read_stel(pasta, up_stel).loc[:, TELECOM]
radcom = read_radcom(pasta, up_radcom)
radcom.rename(columns={"Número da Estação": "Número_da_Estação"}, inplace=True)
mosaico = read_mosaico(pasta, up_mosaico)
radcom["Num_Serviço"] = '231'
radcom["Status"] = "RADCOM"
radcom['Classe_Emissão'] = ''
radcom['Largura_Emissão'] = ''
radcom["Classe"] = radcom.Fase.str.strip() + "-" + radcom.Situação.str.strip()
radcom["Entidade"] = radcom.Entidade.str.rstrip().str.lstrip()
radcom["Num_Ato"] = '-1'
radcom["Data_Ato"] = ""
radcom["Validade_RF"] = ""
radcom = radcom.loc[:, RADIODIFUSAO]
radcom = df_optimize(radcom, exclude=['Frequência'])
stel['Status'] = 'L'
stel["Num_Ato"] = '-1'
stel["Data_Ato"] = ""
stel['Entidade'] = stel.Entidade.str.rstrip().str.lstrip()
stel = df_optimize(stel, exclude=['Frequência'])
mosaico = mosaico.loc[:, RADIODIFUSAO]
mosaico['Classe_Emissão'] = ''
mosaico['Largura_Emissão'] = ''
mosaico = df_optimize(mosaico, exclude=['Frequência'])
rd = mosaico.append(radcom)
rd = rd.append(stel).sort_values("Frequência").reset_index(drop=True)
rd['Num_Serviço'] = rd.Num_Serviço.astype('int')
rd = df_optimize(rd, exclude=['Frequência'])
console.print(":trophy: [green]Base Consolidada. Salvando os arquivos...")
try:
rd.to_feather(f"{pasta}/base.fth")
except ArrowInvalid:
Path(f"{pasta}/base.fth").unlink()
with pd.ExcelWriter(f"{pasta}/base.xlsx") as workbook:
rd.to_excel(workbook, sheet_name='Sheet1', engine="openpyxl", index=False)
return rd
# Cell
def read_stel(pasta, update=False):
"""Lê o banco de dados salvo localmente do STEL. Opcionalmente o atualiza pelo Banco de Dados ANATELBDRO01 caso `update = True` ou não exista o arquivo local"""
if update:
update_stel(pasta)
file = Path(f"{pasta}/stel.fth")
try:
stel = pd.read_feather(file)
except (ArrowInvalid, FileNotFoundError):
file = Path(f"{pasta}/stel.xlsx")
try:
stel = pd.read_excel(file, engine="openpyxl")
except FileNotFoundError:
read_stel(pasta, True)
return stel
def read_radcom(pasta, update=False):
"""Lê o banco de dados salvo localmente de RADCOM. Opcionalmente o atualiza pelo Banco de Dados ANATELBDRO01 caso `update = True` ou não exista o arquivo local"""
if update:
update_radcom(pasta)
file = Path(f"{pasta}/radcom.fth")
try:
radcom = pd.read_feather(file)
except (ArrowInvalid, FileNotFoundError):
file = Path(f"{pasta}/radcom.xlsx")
try:
radcom =
|
pd.read_excel(file, engine="openpyxl")
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.metrics import r2_score
import statsmodels.api as sm
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
xtr = pd.read_csv('xtr.csv').values
ytr = pd.read_csv('ytr.csv').values.reshape(len(xtr) ,)
xte =
|
pd.read_csv('xte.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
def setup(year,hist,hist_year,operating_horizon,perfect_foresight):
# year = 0
# hist = 0
# hist_year = 2010
#read generator parameters into DataFrame
df_gen = pd.read_csv('CA_data_file/generators.csv',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('CA_data_file/paths.csv',header=0)
#calendar
df_calendar = pd.read_excel('CA_data_file/calendar.xlsx',header=0)
#list zones
zones = ['PGE_valley', 'PGE_bay', 'SCE', 'SDGE']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zones]
df_load = df_load.loc[year*8760:year*8760+8759]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro_PGE = pd.read_csv('Hydro_setup/CA_dispatchable_PGE.csv',header=0)
df_hydro_SCE = pd.read_csv('Hydro_setup/CA_dispatchable_SCE.csv',header=0)
##time series of wind generation for each zone
df_wind = pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0)
df_wind = df_wind.loc[:,'CAISO']
df_wind = df_wind.loc[year*8760:year*8760+8759]
df_wind = df_wind.reset_index()
wind_caps = pd.read_excel('CA_data_file/wind_caps.xlsx')
##time series solar for each TAC
df_solar = pd.read_csv('../Stochastic_engine/Synthetic_solar_power/solar_power_sim.csv',header=0)
df_solar = df_solar.loc[year*8760:year*8760+8759]
df_solar = df_solar.reset_index()
solar_caps = pd.read_excel('CA_data_file/solar_caps.xlsx')
##daily time series of dispatchable imports by path
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_imports66 = pd.read_csv('Path_setup/CA_dispatchable_66.csv',header=0)
df_imports61 = pd.read_csv('Path_setup/CA_dispatchable_61.csv',header=0)
df_imports45 = pd.read_csv('Path_setup/CA_dispatchable_45.csv',header=0)
df_imports46 = pd.read_csv('Path_setup/CA_dispatchable_46.csv',header=0)
df_imports42 = pd.read_csv('Path_setup/CA_dispatchable_42.csv',header=0)
df_imports24 = pd.read_csv('Path_setup/CA_dispatchable_24.csv',header=0)
##hourly time series of exports by zone
df_exports24 = pd.read_csv('Path_setup/CA_exports24.csv',header=0)
df_exports42 = pd.read_csv('Path_setup/CA_exports42.csv',header=0)
df_exports45 = pd.read_csv('Path_setup/CA_exports45.csv',header=0)
df_exports66 = pd.read_csv('Path_setup/CA_exports66.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must =
|
pd.read_excel('CA_data_file/must_run.xlsx',header=0)
|
pandas.read_excel
|
import pandas as pd
import numpy as np
import networkx as nx
from sklearn.preprocessing import StandardScaler, normalize, MinMaxScaler
import matplotlib.pyplot as plt
from collections import defaultdict, Counter
import urllib.request as request
import json
import os
from scipy.sparse import csr_matrix as csr_matrix
from matplotlib import cm
from tempfile import TemporaryFile
import torch
import torch_geometric
import argparse
from utils import makepath
## Replace function to rename countries for uniformity
def replace_(country_list, replace_dict):
nnn = country_list.copy()
#nnn[i] = a.replace('&','and')
for k, v in replace_dict.items():
for i,a in enumerate(nnn):
if a == k:
nnn[i] = v
return nnn
def makeplot(edge_list, countries_attributes, country_dict, output_dir):
G = nx.Graph() # Define Graph here
G = G.to_undirected()
G.add_weighted_edges_from(edge_list)
pos = nx.spring_layout(G)
A = nx.adjacency_matrix(G)
A = A.todense()
# attr_names = countries_profile.columns[2:]
attr_dict = get_node_attributes(countries_attributes, country_dict)
# attr_dict = set_node_attributes(scaled_data, attr_names)
nx.set_node_attributes(G, attr_dict)
plt.figure(figsize=(20, 12))
nx.draw(G, pos, node_size=400, with_labels=True, edge_color='#C0C0C0')
plt.savefig(output_dir + 'graph_raw.png')
plt.show()
return
# Import data between countries into tuples of countries and edges
def make_directed_edges(data, compare_dict):
data = data.copy()
edges = []
for i in range(len(data)):
c = (compare_dict[str(data.iloc[i,1])], compare_dict[str(data.iloc[i,2])],
round(data.iloc[i,3],2))
edges.append(c)
#edges = sorted(iedges)
return edges
def check_cyclic_edges(edge_list, remove_edges = False):
self_edges = []
new_edge_list = []
idx = []
for i in range(len(edge_list)):
if (edge_list[i][0] == edge_list[i][1]):
#print(edge_list[i])
self_edges.append(edge_list[i])
idx.append(i)
else:
new_edge_list.append(edge_list[i])
if remove_edges:
return new_edge_list, self_edges
else:
return edge_list, self_edges
# Function to make a dictionary of nodes and attributes
def get_node_attributes(attributes, dict_):
attr_names = attributes.columns[1:]
attr_dict = {}
for i in range(len(attributes)):
attr_dict[dict_[attributes.loc[i][0]]] = {attr_names[j]: k for j, k in enumerate(attributes.loc[i][1:])}
return attr_dict
def income_level_dict(income_grp, country_dict):
groups = income_grp.iloc[:,1]
classes = list(set(groups))
c_dict = {}
for c in classes:
l = income_grp[groups== c].iloc[:,0]
c_dict[c] = [country_dict[a] for a in l]
return c_dict
# Function to make a dictionary of nod# Function to make a dictionary of nodes and attributes
def get_node_attributes(attributes, dict_):
attr_names = attributes.columns[1:]
attr_dict = {}
for i in range(len(attributes)):
attr_dict[dict_[attributes.loc[i][0]]] = {attr_names[j]: k for j, k in enumerate(attributes.loc[i][1:])}
return attr_dict
## Read data of countries import and exports with partner countries from directory
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--raw_data_dir", default = '../data', type = str, required = False)
parser.add_argument("--output_dir", default = '../data/processed', type = str, required = False)
parser.add_argument("-makeplot", type = bool, default = True, help = "Plot graph")
args = parser.parse_args()
input_dir = args.raw_data_dir
comtradeurl = os.path.join(input_dir, "comtrade_data")
makepath(args.output_dir)
print("Processing data...")
replace_dict = np.load(input_dir + '/countries_rename.npy', allow_pickle=True).item() # Get dict items from npy file
frames = []
for name in os.listdir(comtradeurl):
a = pd.read_csv(os.path.join(comtradeurl, name))
a = a[['Trade Flow','Reporter','Partner','Trade Value (US$)']]
frames.append(a)
trade =
|
pd.concat(frames, ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = pd.DataFrame(dataFeat['user_occupation_id'])
result = result.drop_duplicates(['user_occupation_id'],keep='first')
"1.统计user_occupation出现次数"
dataFeat['user_occupation_count'] = dataFeat['user_occupation_id']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_count',aggfunc='count').reset_index()
del dataFeat['user_occupation_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"2.统计user_occupation历史被购买的次数"
dataFeat['user_occupation_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_occupation_buy_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"3.统计user_occupation转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_occupation_buy_count,result.user_occupation_count))
result['user_occupation_buy_ratio'] = buy_ratio
"4.统计user_occupation历史未被够买的次数"
result['user_occupation_not_buy_count'] = result['user_occupation_count'] - result['user_occupation_buy_count']
return result
def get_context_page_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['context_page_id'])
result = result.drop_duplicates(['context_page_id'],keep='first')
"1.统计context_page出现次数"
dataFeat['context_page_count'] = dataFeat['context_page_id']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_count',aggfunc='count').reset_index()
del dataFeat['context_page_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"2.统计context_page历史被购买的次数"
dataFeat['context_page_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_page_buy_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"3.统计context_page转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_page_buy_count,result.context_page_count))
result['context_page_buy_ratio'] = buy_ratio
"4.统计context_page历史未被够买的次数"
result['context_page_not_buy_count'] = result['context_page_count'] - result['context_page_buy_count']
return result
def get_shop_review_num_level_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['shop_review_num_level'])
result = result.drop_duplicates(['shop_review_num_level'],keep='first')
"1.统计shop_review_num_level出现次数"
dataFeat['shop_review_num_level_count'] = dataFeat['shop_review_num_level']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['shop_review_num_level_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"2.统计shop_review_num_level历史被购买的次数"
dataFeat['shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"3.统计shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_review_num_level_buy_count,result.shop_review_num_level_count))
result['shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计shop_review_num_level历史未被够买的次数"
result['shop_review_num_level_not_buy_count'] = result['shop_review_num_level_count'] - result['shop_review_num_level_buy_count']
return result
def get_item_category_list_2_feat(data,dataFeat):
"item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat['item_category_list_2'])
result = result.drop_duplicates(['item_category_list_2'],keep='first')
"1.统计item_category_list_2出现次数"
dataFeat['item_category_list_2_count'] = dataFeat['item_category_list_2']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['item_category_list_2_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"2.统计item_category_list_2历史被购买的次数"
dataFeat['item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"3.统计item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_category_list_2_buy_count,result.item_category_list_2_count))
result['item_category_list_2_buy_ratio'] = buy_ratio
"4.统计item_category_list_2历史未被够买的次数"
result['item_category_list_2_not_buy_count'] = result['item_category_list_2_count'] - result['item_category_list_2_buy_count']
return result
def get_user_item_feat(data,dataFeat):
"user-item的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_id']])
result = result.drop_duplicates(['user_id','item_id'],keep='first')
"1.统计user-item出现次数"
dataFeat['user_item_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_count',aggfunc='count').reset_index()
del dataFeat['user_item_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"2.统计user-item历史被购买的次数"
dataFeat['user_item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_buy_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"3.统计user-item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_buy_count,result.user_item_count))
result['user_item_buy_ratio'] = buy_ratio
"4.统计user-item历史未被够买的次数"
result['user_item_not_buy_count'] = result['user_item_count'] - result['user_item_buy_count']
return result
def get_user_shop_feat(data,dataFeat):
"user-shop的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_id']])
result = result.drop_duplicates(['user_id','shop_id'],keep='first')
"1.统计user-shop出现次数"
dataFeat['user_shop_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_count',aggfunc='count').reset_index()
del dataFeat['user_shop_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"2.统计user-shop历史被购买的次数"
dataFeat['user_shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"3.统计user-shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_buy_count,result.user_shop_count))
result['user_shop_buy_ratio'] = buy_ratio
"4.统计user-shop历史未被够买的次数"
result['user_shop_not_buy_count'] = result['user_shop_count'] - result['user_shop_buy_count']
return result
def get_user_context_feat(data,dataFeat):
"user-context的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_id']])
result = result.drop_duplicates(['user_id','context_id'],keep='first')
"1.统计user-context出现次数"
dataFeat['user_context_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_count',aggfunc='count').reset_index()
del dataFeat['user_context_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"2.统计user-context历史被购买的次数"
dataFeat['user_context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_buy_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"3.统计user-context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_buy_count,result.user_context_count))
result['user_context_buy_ratio'] = buy_ratio
"4.统计user-context历史未被够买的次数"
result['user_context_not_buy_count'] = result['user_context_count'] - result['user_context_buy_count']
return result
def get_user_timestamp_feat(data,dataFeat):
"user-context_timestamp的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_timestamp']])
result = result.drop_duplicates(['user_id','context_timestamp'],keep='first')
"1.统计user-context_timestamp出现次数"
dataFeat['user_context_timestamp_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['user_context_timestamp_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"2.统计user-context_timestamp历史被购买的次数"
dataFeat['user_context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_timestamp_buy_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"3.统计user-context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_timestamp_buy_count,result.user_context_timestamp_count))
result['user_context_timestamp_buy_ratio'] = buy_ratio
"4.统计user-context_timestamp历史未被够买的次数"
result['user_context_timestamp_not_buy_count'] = result['user_context_timestamp_count'] - result['user_context_timestamp_buy_count']
return result
def get_user_item_brand_feat(data,dataFeat):
"user-item_brand的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_brand_id']])
result = result.drop_duplicates(['user_id','item_brand_id'],keep='first')
"1.统计user-item_brand_id出现次数"
dataFeat['user_item_brand_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_brand_id_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"2.统计user-item_brand_id历史被购买的次数"
dataFeat['user_item_brand_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_brand_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"3.统计user-item_brand_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_brand_id_buy_count,result.user_item_brand_id_count))
result['user_item_brand_id_buy_ratio'] = buy_ratio
"4.统计user-item_brand_id历史未被够买的次数"
result['user_item_brand_id_not_buy_count'] = result['user_item_brand_id_count'] - result['user_item_brand_id_buy_count']
return result
def get_user_user_gender_feat(data,dataFeat):
"user-user_gender的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_gender_id']])
result = result.drop_duplicates(['user_id','user_gender_id'],keep='first')
"1.统计user-user_gender_id出现次数"
dataFeat['user_user_gender_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_gender_id_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"2.统计user-user_gender_id历史被购买的次数"
dataFeat['user_user_gender_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_gender_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"3.统计user-user_gender_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_gender_id_buy_count,result.user_user_gender_id_count))
result['user_user_gender_id_buy_ratio'] = buy_ratio
"4.统计user-user_gender_id历史未被够买的次数"
result['user_user_gender_id_not_buy_count'] = result['user_user_gender_id_count'] - result['user_user_gender_id_buy_count']
return result
def get_user_item_city_feat(data,dataFeat):
"user-item_city的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_city_id']])
result = result.drop_duplicates(['user_id','item_city_id'],keep='first')
"1.统计user-item_city_id出现次数"
dataFeat['user_item_city_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_city_id_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"2.统计user-item_city_id历史被购买的次数"
dataFeat['user_item_city_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_city_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"3.统计user-item_city_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_city_id_buy_count,result.user_item_city_id_count))
result['user_item_city_id_buy_ratio'] = buy_ratio
"4.统计user-item_city_id历史未被够买的次数"
result['user_item_city_id_not_buy_count'] = result['user_item_city_id_count'] - result['user_item_city_id_buy_count']
return result
def get_user_context_page_feat(data,dataFeat):
"user-context_page的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_page_id']])
result = result.drop_duplicates(['user_id','context_page_id'],keep='first')
"1.统计user-context_page_id出现次数"
dataFeat['user_context_page_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_count',aggfunc='count').reset_index()
del dataFeat['user_context_page_id_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"2.统计user-context_page_id历史被购买的次数"
dataFeat['user_context_page_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_page_id_buy_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"3.统计user-context_page_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_page_id_buy_count,result.user_context_page_id_count))
result['user_context_page_id_buy_ratio'] = buy_ratio
"4.统计user-context_page_id历史未被够买的次数"
result['user_context_page_id_not_buy_count'] = result['user_context_page_id_count'] - result['user_context_page_id_buy_count']
return result
def get_user_user_occupation_feat(data,dataFeat):
"user-user_occupation的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_occupation_id']])
result = result.drop_duplicates(['user_id','user_occupation_id'],keep='first')
"1.统计user-user_occupation_id出现次数"
dataFeat['user_user_occupation_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_occupation_id_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"2.统计user-user_occupation_id历史被购买的次数"
dataFeat['user_user_occupation_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_occupation_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"3.统计user-user_occupation_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_occupation_id_buy_count,result.user_user_occupation_id_count))
result['user_user_occupation_id_buy_ratio'] = buy_ratio
"4.统计user-user_occupation_id历史未被够买的次数"
result['user_user_occupation_id_not_buy_count'] = result['user_user_occupation_id_count'] - result['user_user_occupation_id_buy_count']
return result
def get_user_shop_review_num_level_feat(data,dataFeat):
"user-shop_review_num_level的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_review_num_level']])
result = result.drop_duplicates(['user_id','shop_review_num_level'],keep='first')
"1.统计user-shop_review_num_level出现次数"
dataFeat['user_shop_review_num_level_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['user_shop_review_num_level_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"2.统计user-shop_review_num_level历史被购买的次数"
dataFeat['user_shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"3.统计user-shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_review_num_level_buy_count,result.user_shop_review_num_level_count))
result['user_shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计user-shop_review_num_level历史未被够买的次数"
result['user_shop_review_num_level_not_buy_count'] = result['user_shop_review_num_level_count'] - result['user_shop_review_num_level_buy_count']
return result
def get_user_item_category_list_2_feat(data,dataFeat):
"user-item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_category_list_2']])
result = result.drop_duplicates(['user_id','item_category_list_2'],keep='first')
"1.统计user-item_category_list_2出现次数"
dataFeat['user_item_category_list_2_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['user_item_category_list_2_count']
result = pd.merge(result,feat,on=['user_id','item_category_list_2'],how='left')
"2.统计user-item_category_list_2历史被购买的次数"
dataFeat['user_item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['user_id','item_category_list_2'],how='left')
"3.统计user-item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_category_list_2_buy_count,result.user_item_category_list_2_count))
result['user_item_category_list_2_buy_ratio'] = buy_ratio
"4.统计user-item_category_list_2历史未被够买的次数"
result['user_item_category_list_2_not_buy_count'] = result['user_item_category_list_2_count'] - result['user_item_category_list_2_buy_count']
return result
def merge_feat(data,dataFeat):
"特征的merge"
#生成特征
item = get_item_feat(data,dataFeat)
user = get_user_feat(data,dataFeat)
context = get_context_feat(data,dataFeat)
shop = get_shop_feat(data,dataFeat)
timestamp = get_timestamp_feat(data,dataFeat)
item_brand = get_item_brand_feat(data,dataFeat)
user_gender = get_user_gender_feat(data,dataFeat)
item_city = get_item_city_feat(data,dataFeat)
context_page = get_context_page_feat(data,dataFeat)
user_occupation = get_user_occupation_feat(data,dataFeat)
shop_review_num_level = get_shop_review_num_level_feat(data,dataFeat)
item_category_list_2 = get_item_category_list_2_feat(data,dataFeat)
#交互特征
user_item = get_user_item_feat(data,dataFeat)
user_shop = get_user_shop_feat(data,dataFeat)
user_context = get_user_context_feat(data,dataFeat)
user_timestamp = get_user_timestamp_feat(data,dataFeat)
user_item_brand = get_user_item_brand_feat(data,dataFeat)
user_user_gender = get_user_user_gender_feat(data,dataFeat)
user_item_city = get_user_item_city_feat(data,dataFeat)
user_context_page = get_user_context_page_feat(data,dataFeat)
user_user_occupation = get_user_user_occupation_feat(data,dataFeat)
user_shop_review_num_level = get_user_shop_review_num_level_feat(data,dataFeat)
user_item_category_list_2 = get_user_item_category_list_2_feat(data,dataFeat)
#merge特征
data = pd.merge(data,item,on='item_id',how='left')
data = pd.merge(data,user,on='user_id',how='left')
data = pd.merge(data,context,on='context_id',how='left')
data = pd.merge(data,timestamp,on='context_timestamp',how='left')
data = pd.merge(data,shop,on='shop_id',how='left')
data = pd.merge(data,item_brand,on='item_brand_id',how='left')
data =
|
pd.merge(data,user_gender,on='user_gender_id',how='left')
|
pandas.merge
|
import numpy as np
import pandas as pd
from rdt import get_demo
def test_get_demo():
demo = get_demo()
assert list(demo.columns) == [
'last_login', 'email_optin', 'credit_card', 'age', 'dollars_spent'
]
assert len(demo) == 5
assert list(demo.isna().sum(axis=0)) == [1, 1, 1, 0, 1]
def test_get_demo_many_rows():
demo = get_demo(10)
login_dates = pd.Series([
'2021-06-26', '2021-02-10', 'NaT', '2020-09-26', '2020-12-22', '2019-11-27',
'2002-05-10', '2014-10-04', '2014-03-19', '2015-09-13'
], dtype='datetime64[ns]')
email_optin = [False, False, False, True, np.nan, np.nan, False, True, False, False]
credit_card = [
'VISA', 'VISA', 'AMEX', np.nan, 'DISCOVER', 'AMEX', 'AMEX', 'DISCOVER', 'DISCOVER', 'VISA'
]
age = [29, 18, 21, 45, 32, 50, 93, 75, 39, 66]
dollars_spent = [99.99, np.nan, 2.50, 25.00, 19.99, 52.48, 39.99, 4.67, np.nan, 23.28]
expected = pd.DataFrame({
'last_login': login_dates,
'email_optin': email_optin,
'credit_card': credit_card,
'age': age,
'dollars_spent': dollars_spent
})
|
pd.testing.assert_frame_equal(demo, expected)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
from tqdm import tqdm
from ..binarize import to_binary
from cana.boolean_node import BooleanNode
# set up variables
n_inputs = 2**2
n_rules = 2**(2**3)
df_dict = []
for rule in tqdm(range(n_rules)):
canal = {} # becomes row of dataframe
# to_binary returns list of strings of binary digits
arr = to_binary(rule, digits=8)
# use CANA to compute canalization
bn = BooleanNode.from_output_list(outputs=arr, name=rule)
ks = bn.input_symmetry()
kr = bn.input_redundancy()
sym0, sym1, sym2 = bn.input_symmetry(mode='input')
red0, red1, red2 = bn.input_redundancy(mode='input')
# update the dictionary with the PI values
canal['rule'] = rule
canal['kr*'] = kr
canal['ks*'] = ks
canal['r(0)'] = red0
canal['r(1)'] = red1
canal['r(2)'] = red2
canal['s(0)'] = sym0
canal['s(1)'] = sym1
canal['s(2)'] = sym2
df_dict.append(canal)
# write out the dataframe
df =
|
pd.DataFrame(df_dict)
|
pandas.DataFrame
|
########################################################################################################
# data_sql.py - Data pull from json, clean it up and upload to SQL
# by <NAME>
#
# This is Python script Pulls the metadata (link) from following three json data:-
# 1. https://api.weather.gov/points/31.7276,-110.8754
# 2. https://api.weather.gov/points/32.395,-110.6911
# 3. https://api.weather.gov/points/32.4186,-110.7383
#
# The Link pulled (json data) from the above three json data are
# the grid data links that are use to pull all the weather related data for the three capmgrounds:-
# 1. https://api.weather.gov/gridpoints/TWC/91,26
# 2. https://api.weather.gov/gridpoints/TWC/101,54
# 3. https://api.weather.gov/gridpoints/TWC/100,56
#
# From the above grid data 4 dataframes are created. The challenge was pulling the data from the
# above json links and then converting the date-time columns to the format (date-time) that can be used
# to upload to SQL and creating the graphs. Also Temperatures need to be converted to degreeF and wind
# speeds to Miles per hour:-
# 1. Campgroud information dF with information like lat, lon, elevation,
# meta url, grid url, forest url, campsite url fire danger and map code.
# 2. One for each campground (bs_grid_df, rc_grid_df, sc_grid_df). These df
# have columns (temp in degreeF, temp time, wind speed, wind speed time, wind gust,
# wind gust time, prob precipitation, Prob precp time, qty precip, qty precip time).
#
# SQLalchemy was used to create 4 tables in postgres SQL and then the above 4 DataFrames were uploaded
# Postgres SQL. The table names in SQL are:
# 1. camp_wx
# 2. cg_bog_spring
# 3. cg_rose_canyon
# 4. cg_spencer_canyon
#
# This script was converted from data_sql.ipynb
##########################################################################################################
# %%
# ------------------------
# Dependencies and Setup
# ------------------------
import pandas as pd
import json
import requests
import numpy as np
import datetime
from datetime import timedelta
from splinter import Browser
from bs4 import BeautifulSoup
def update_db(uri):
# %%
# --------------------------------------------------------------------
# Bog Spring CAMPGROUND
# --------------------------------------------------------------------
# ---------------------------------------------
# Pull Grid Data URL From Metadata url for
# ---------------------------------------------
bs_url = "https://api.weather.gov/points/31.7276,-110.8754"
response_bs = requests.get(bs_url)
data_bs = response_bs.json()
data_bs
grid_data_bs = data_bs["properties"]["forecastGridData"]
grid_data_bs
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for BogSprings Campground
# ------------------------------------------------------------------------
bs_forcast_url = grid_data_bs
response_bs_forecast = requests.get(bs_forcast_url)
data_bs_forecast = response_bs_forecast.json()
data_bs_forecast
lat_bs = data_bs_forecast["geometry"]["coordinates"][0][0][1]
lat_bs
lng_bs = data_bs_forecast["geometry"]["coordinates"][0][0][0]
lng_bs
elevation_bs = data_bs_forecast["properties"]["elevation"]["value"]
elevation_bs
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
bs_df = pd.DataFrame({"id": 1,
"campground": "Bog Springs",
"lat": [lat_bs],
"lon": [lng_bs],
"elevation": [elevation_bs],
"nws_meta_url": [bs_url],
"nws_grid_url": [grid_data_bs],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25732&actid=29",
"campsite_url": "https://www.fs.usda.gov/Internet/FSE_MEDIA/fseprd746637.jpg",
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3393.5714340164473!2d-110.87758868361043!3d31.72759998130141!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6970db0a5e44d%3A0x1b48084e4d6db970!2sBog%20Springs%20Campground!5e0!3m2!1sen!2sus!4v1626560932236!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
bs_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp = []
for i in data_bs_forecast["properties"]["temperature"]["values"]:
temp.append(i)
temp_df = pd.DataFrame(temp)
temp_df
# Temperature conversion to Degree Fahrenheit
temp_df['degF'] = (temp_df['value'] * 9 / 5) + 32
temp_df
# validTime Column split to date and time for Temperature
date_temp = temp_df['validTime'].str.split('T', n=1, expand=True)
time_temp = date_temp[1].str.split('+', n=1, expand=True)
time_temp
temp_df['date_temp'] = date_temp[0]
temp_df['time_temp'] = time_temp[0]
# Combine date and time with a space in between the two
temp_df['date_time_temp'] = temp_df['date_temp'] + ' ' + temp_df['time_temp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# temp_df['date_time_temp'] = pd.to_datetime(temp_df['date_time_temp'])
# Pull all the data for today + 3 days
time_delta_temp = datetime.datetime.strptime(temp_df['date_temp'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_df['times_temp'] = time_delta_temp.strftime("%Y-%m-%d")
temp_df = temp_df.loc[temp_df['date_temp'] < temp_df['times_temp']]
temp_df
# temp_df.dtypes
# =================== Wind Speed Data ======================
wind_speed = []
for i in data_bs_forecast["properties"]["windSpeed"]["values"]:
wind_speed.append(i)
windSpeed_df = pd.DataFrame(wind_speed)
windSpeed_df
# Converting KM/hour to Miles/hour
windSpeed_df['miles/hour'] = windSpeed_df['value'] * 0.621371
windSpeed_df
# validTime Column split to date and time for Wind Speed
date_ws = windSpeed_df['validTime'].str.split('T', n=1, expand=True)
time_ws = date_ws[1].str.split('+', n=1, expand=True)
time_ws
windSpeed_df['date_ws'] = date_ws[0]
windSpeed_df['time_ws'] = time_ws[0]
# Combine date and time with a space in between the two
windSpeed_df['date_time_ws'] = windSpeed_df['date_ws'] + ' ' + windSpeed_df['time_ws']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# windSpeed_df['date_time_ws'] = pd.to_datetime(windSpeed_df['date_time_ws'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_df['date_ws'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_df['times_ws'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_df = windSpeed_df.loc[windSpeed_df['date_ws'] < windSpeed_df['times_ws']]
windSpeed_df
# windSpeed_df.dtypes
# =================== Wind Gust Data ======================
wind_gust = []
for i in data_bs_forecast["properties"]["windGust"]["values"]:
wind_gust.append(i)
wind_gust_df = pd.DataFrame(wind_gust)
wind_gust_df
# Converting KM/hour to Miles/hour
wind_gust_df['m/h'] = wind_gust_df['value'] * 0.621371
wind_gust_df
# # validTime Column split to date and time for Wind Gusts
date_wg = wind_gust_df['validTime'].str.split('T', n=1, expand=True)
time_wg = date_wg[1].str.split('+', n=1, expand=True)
time_wg
wind_gust_df['date_wg'] = date_wg[0]
wind_gust_df['time_wg'] = time_wg[0]
# Combine date and time with a space in between the two
wind_gust_df['date_time_wg'] = wind_gust_df['date_wg'] + ' ' + wind_gust_df['time_wg']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# wind_gust_df['date_time_wg'] = pd.to_datetime(wind_gust_df['date_time_wg'])
wind_gust_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_df['date_wg'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_df['times_wg'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_df = wind_gust_df.loc[wind_gust_df['date_wg'] < wind_gust_df['times_wg']]
wind_gust_df
# wind_gust_df.dtypes
# =================== Probability of Precipitation Data ======================
prob_precip = []
for i in data_bs_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip.append(i)
prob_precip_df = pd.DataFrame(prob_precip)
prob_precip_df
# # validTime Column split to date and time for Probability Precipitation
date_pp = prob_precip_df['validTime'].str.split('T', n=1, expand=True)
time_pp = date_pp[1].str.split('+', n=1, expand=True)
time_pp
prob_precip_df['date_pp'] = date_pp[0]
prob_precip_df['time_pp'] = time_pp[0]
# Combine date and time with a space in between the two
prob_precip_df['date_time_pp'] = prob_precip_df['date_pp'] + ' ' + prob_precip_df['time_pp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# prob_precip_df['date_time_pp'] = pd.to_datetime(prob_precip_df['date_time_pp'])
prob_precip_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_df['date_pp'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_df['times_pp'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_df = prob_precip_df.loc[prob_precip_df['date_pp'] < prob_precip_df['times_pp']]
prob_precip_df
# prob_precip_df.dtypes
# =================== Quantity of Precipitation Data ======================
qty_precip = []
for i in data_bs_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip.append(i)
qty_precip_df = pd.DataFrame(qty_precip)
qty_precip_df
# # validTime Column split to date and time for quantity Precipitation
date_qp = qty_precip_df['validTime'].str.split('T', n=1, expand=True)
time_qp = date_qp[1].str.split('+', n=1, expand=True)
time_qp
qty_precip_df['date_qp'] = date_qp[0]
qty_precip_df['time_qp'] = time_qp[0]
# Combine date and time with a space in between the two
qty_precip_df['date_time_qp'] = qty_precip_df['date_qp'] + ' ' + qty_precip_df['time_qp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# qty_precip_df['date_time_qp'] = pd.to_datetime(qty_precip_df['date_time_qp'])
qty_precip_df
# Pull all the data for today + 3 days
time_delta_qp = datetime.datetime.strptime(qty_precip_df['date_qp'][0],"%Y-%m-%d") + timedelta(days = 4)
qty_precip_df['times_qp'] = time_delta_qp.strftime("%Y-%m-%d")
qty_precip_df = qty_precip_df.loc[qty_precip_df['date_qp'] < qty_precip_df['times_qp']]
qty_precip_df
# qty_precip_df.dtypes
# =================== Create DataFrame with all the above data for Bog Spring Campground ======================
bs_grid_df = pd.DataFrame({"id":1,
"campground": "Bog Springs",
"forecasted_temperature_degF": temp_df['degF'],
"forecastTime_temperature": temp_df['date_time_temp'],
"forecasted_windSpeed_miles_per_h": windSpeed_df['miles/hour'],
"forecastTime_windSpeed": windSpeed_df['date_time_ws'],
"forecasted_windGust_miles_per_h": wind_gust_df['m/h'],
"forecastTime_windGust": wind_gust_df['date_time_wg'],
"forecasted_probabilityOfPrecipitation": prob_precip_df['value'],
"forecastTime_probabilityOfPrecipitation": prob_precip_df['date_time_pp'],
"forecasted_quantityOfPrecipitation_mm": qty_precip_df['value'],
"forecastTime_quantityOfPrecipitation": qty_precip_df['date_time_qp'],
})
bs_grid_df
# bs_grid_df.dtypes
# %%
# --------------------------------------------------------------------
# ROSE CANYON CAMPGROUND
# --------------------------------------------------------------------
# -------------------------------------------
# Pull Grid Data URL From Metadata url
# -------------------------------------------
rc_url = "https://api.weather.gov/points/32.395,-110.6911"
response_rc = requests.get(rc_url)
data_rc = response_rc.json()
data_rc
grid_data_rc = data_rc["properties"]["forecastGridData"]
grid_data_rc
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for Rose Canyon Campground
# ------------------------------------------------------------------------
rc_forcast_url = grid_data_rc
response_rc_forecast = requests.get(rc_forcast_url)
data_rc_forecast = response_rc_forecast.json()
data_rc_forecast
lat_rc = data_rc_forecast["geometry"]["coordinates"][0][0][1]
lat_rc
lng_rc = data_rc_forecast["geometry"]["coordinates"][0][0][0]
lng_rc
elevation_rc = data_rc_forecast["properties"]["elevation"]["value"]
elevation_rc
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
rc_df = pd.DataFrame({"id": 2,
"campground": "Rose Canyon",
"lat": [lat_rc],
"lon": [lng_rc],
"elevation": [elevation_rc],
"nws_meta_url": [rc_url],
"nws_grid_url": [grid_data_rc],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25698&actid=29",
"campsite_url": "https://cdn.recreation.gov/public/2019/06/20/00/19/232284_beeddff5-c966-49e2-93a8-c63c1cf21294_700.jpg",
# "nws_meta_json":[data_rc],
# "nws_grid_json": [data_rc_forecast],
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3368.97130566869!2d-110.70672358360277!3d32.39313088108983!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6400421614087%3A0xb6cfb84a4b05c95b!2sRose%20Canyon%20Campground!5e0!3m2!1sen!2sus!4v1626560965073!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
rc_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp_rc = []
for i in data_rc_forecast["properties"]["temperature"]["values"]:
temp_rc.append(i)
temp_rc_df = pd.DataFrame(temp_rc)
temp_rc_df
# Temperature conversion to Degree Fahrenheit
temp_rc_df['degF_rc'] = (temp_rc_df['value'] * 9 / 5) + 32
temp_rc_df
# validTime Column split to date and time for Temperature
date_temp_rc = temp_rc_df['validTime'].str.split('T', n=1, expand=True)
time_temp_rc = date_temp_rc[1].str.split('+', n=1, expand=True)
time_temp_rc
temp_rc_df['date_temp_rc'] = date_temp_rc[0]
temp_rc_df['time_temp_rc'] = time_temp_rc[0]
# Combine date and time with a space in between the two
temp_rc_df['date_time_temp_rc'] = temp_rc_df['date_temp_rc'] + ' ' + temp_rc_df['time_temp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# temp_rc_df['date_time_temp_rc'] = pd.to_datetime(temp_rc_df['date_time_temp_rc'])
# Pull all the data for today + 3 days
time_delta_temp_rc = datetime.datetime.strptime(temp_rc_df['date_temp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_rc_df['times_temp_rc'] = time_delta_temp_rc.strftime("%Y-%m-%d")
temp_rc_df = temp_rc_df.loc[temp_rc_df['date_temp_rc'] < temp_rc_df['times_temp_rc']]
temp_rc_df
temp_rc_df.dtypes
# =================== Wind Speed Data ======================
wind_speed_rc = []
for i in data_rc_forecast["properties"]["windSpeed"]["values"]:
wind_speed_rc.append(i)
windSpeed_rc_df = pd.DataFrame(wind_speed_rc)
windSpeed_rc_df
# Converting KM/hour to Miles/hour
windSpeed_rc_df['miles/hour_rc'] = windSpeed_rc_df['value'] * 0.621371
windSpeed_rc_df
# validTime Column split to date and time for wind Speed
date_ws_rc = windSpeed_rc_df['validTime'].str.split('T', n=1, expand=True)
time_ws_rc = date_ws_rc[1].str.split('+', n=1, expand=True)
time_ws_rc
windSpeed_rc_df['date_ws_rc'] = date_ws_rc[0]
windSpeed_rc_df['time_ws_rc'] = time_ws_rc[0]
# Combine date and time with a space in between the two
windSpeed_rc_df['date_time_ws_rc'] = windSpeed_rc_df['date_ws_rc'] + ' ' + windSpeed_rc_df['time_ws_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# windSpeed_rc_df['date_time_ws_rc'] = pd.to_datetime(windSpeed_rc_df['date_time_ws_rc'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_rc_df['date_ws_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_rc_df['times_ws_rc'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_rc_df = windSpeed_rc_df.loc[windSpeed_rc_df['date_ws_rc'] < windSpeed_rc_df['times_ws_rc']]
windSpeed_rc_df
# windSpeed_rc_df.dtypes
# =================== Wind Gust Data ======================
wind_gust_rc = []
for i in data_rc_forecast["properties"]["windGust"]["values"]:
wind_gust_rc.append(i)
wind_gust_rc_df =
|
pd.DataFrame(wind_gust_rc)
|
pandas.DataFrame
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module contains class PandasDataframe.
PandasDataframe is a parent abstract class for any dataframe class
for pandas storage format.
"""
from collections import OrderedDict
import numpy as np
import pandas
import datetime
from pandas.core.indexes.api import ensure_index, Index, RangeIndex
from pandas.core.dtypes.common import is_numeric_dtype, is_list_like
from pandas._libs.lib import no_default
from typing import List, Hashable, Optional, Callable, Union, Dict
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.error_message import ErrorMessage
from modin.core.storage_formats.pandas.parsers import (
find_common_type_cat as find_common_type,
)
from modin.core.dataframe.base.dataframe.dataframe import ModinDataframe
from modin.core.dataframe.base.dataframe.utils import (
Axis,
JoinType,
)
from modin.pandas.indexing import is_range_like
from modin.pandas.utils import is_full_grab_slice, check_both_not_none
from modin.logging import LoggerMetaClass
def lazy_metadata_decorator(apply_axis=None, axis_arg=-1, transpose=False):
"""
Lazily propagate metadata for the ``PandasDataframe``.
This decorator first adds the minimum required reindexing operations
to each partition's queue of functions to be lazily applied for
each PandasDataframe in the arguments by applying the function
run_f_on_minimally_updated_metadata. The decorator also sets the
flags for deferred metadata synchronization on the function result
if necessary.
Parameters
----------
apply_axis : str, default: None
The axes on which to apply the reindexing operations to the `self._partitions` lazily.
Case None: No lazy metadata propagation.
Case "both": Add reindexing operations on both axes to partition queue.
Case "opposite": Add reindexing operations complementary to given axis.
Case "rows": Add reindexing operations on row axis to partition queue.
axis_arg : int, default: -1
The index or column axis.
transpose : bool, default: False
Boolean for if a transpose operation is being used.
Returns
-------
Wrapped Function.
"""
def decorator(f):
from functools import wraps
@wraps(f)
def run_f_on_minimally_updated_metadata(self, *args, **kwargs):
for obj in (
[self]
+ [o for o in args if isinstance(o, PandasDataframe)]
+ [v for v in kwargs.values() if isinstance(v, PandasDataframe)]
+ [
d
for o in args
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
+ [
d
for _, o in kwargs.items()
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
):
if apply_axis == "both":
if obj._deferred_index and obj._deferred_column:
obj._propagate_index_objs(axis=None)
elif obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif apply_axis == "opposite":
if "axis" not in kwargs:
axis = args[axis_arg]
else:
axis = kwargs["axis"]
if axis == 0 and obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif axis == 1 and obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif apply_axis == "rows":
obj._propagate_index_objs(axis=0)
result = f(self, *args, **kwargs)
if apply_axis is None and not transpose:
result._deferred_index = self._deferred_index
result._deferred_column = self._deferred_column
elif apply_axis is None and transpose:
result._deferred_index = self._deferred_column
result._deferred_column = self._deferred_index
elif apply_axis == "opposite":
if axis == 0:
result._deferred_index = self._deferred_index
else:
result._deferred_column = self._deferred_column
elif apply_axis == "rows":
result._deferred_column = self._deferred_column
return result
return run_f_on_minimally_updated_metadata
return decorator
class PandasDataframe(object, metaclass=LoggerMetaClass):
"""
An abstract class that represents the parent class for any pandas storage format dataframe class.
This class provides interfaces to run operations on dataframe partitions.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = None
_query_compiler_cls = PandasQueryCompiler
# These properties flag whether or not we are deferring the metadata synchronization
_deferred_index = False
_deferred_column = False
@property
def __constructor__(self):
"""
Create a new instance of this object.
Returns
-------
PandasDataframe
"""
return type(self)
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = ensure_index(columns)
if row_lengths is not None and len(self.index) > 0:
# An empty frame can have 0 rows but a nonempty index. If the frame
# does have rows, the number of rows must equal the size of the
# index.
num_rows = sum(row_lengths)
if num_rows > 0:
ErrorMessage.catch_bugs_and_request_email(
num_rows != len(self._index_cache),
"Row lengths: {} != {}".format(num_rows, len(self._index_cache)),
)
ErrorMessage.catch_bugs_and_request_email(
any(val < 0 for val in row_lengths),
"Row lengths cannot be negative: {}".format(row_lengths),
)
self._row_lengths_cache = row_lengths
if column_widths is not None and len(self.columns) > 0:
# An empty frame can have 0 column but a nonempty column index. If
# the frame does have columns, the number of columns must equal the
# size of the columns.
num_columns = sum(column_widths)
if num_columns > 0:
ErrorMessage.catch_bugs_and_request_email(
num_columns != len(self._columns_cache),
"Column widths: {} != {}".format(
num_columns, len(self._columns_cache)
),
)
ErrorMessage.catch_bugs_and_request_email(
any(val < 0 for val in column_widths),
"Column widths cannot be negative: {}".format(column_widths),
)
self._column_widths_cache = column_widths
self._dtypes = dtypes
self._filter_empties()
@property
def _row_lengths(self):
"""
Compute the row partitions lengths if they are not cached.
Returns
-------
list
A list of row partitions lengths.
"""
if self._row_lengths_cache is None:
if len(self._partitions.T) > 0:
self._row_lengths_cache = [
obj.length() for obj in self._partitions.T[0]
]
else:
self._row_lengths_cache = []
return self._row_lengths_cache
@property
def _column_widths(self):
"""
Compute the column partitions widths if they are not cached.
Returns
-------
list
A list of column partitions widths.
"""
if self._column_widths_cache is None:
if len(self._partitions) > 0:
self._column_widths_cache = [obj.width() for obj in self._partitions[0]]
else:
self._column_widths_cache = []
return self._column_widths_cache
@property
def _axes_lengths(self):
"""
Get a pair of row partitions lengths and column partitions widths.
Returns
-------
list
The pair of row partitions lengths and column partitions widths.
"""
return [self._row_lengths, self._column_widths]
@property
def dtypes(self):
"""
Compute the data types if they are not cached.
Returns
-------
pandas.Series
A pandas Series containing the data types for this dataframe.
"""
if self._dtypes is None:
self._dtypes = self._compute_dtypes()
return self._dtypes
def _compute_dtypes(self):
"""
Compute the data types via TreeReduce pattern.
Returns
-------
pandas.Series
A pandas Series containing the data types for this dataframe.
"""
def dtype_builder(df):
return df.apply(lambda col: find_common_type(col.values), axis=0)
map_func = self._build_treereduce_func(0, lambda df: df.dtypes)
reduce_func = self._build_treereduce_func(0, dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
dtypes = self.tree_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
else:
dtypes = pandas.Series([])
# reset name to None because we use "__reduced__" internally
dtypes.name = None
return dtypes
_index_cache = None
_columns_cache = None
def _validate_set_axis(self, new_labels, old_labels):
"""
Validate the possibility of replacement of old labels with the new labels.
Parameters
----------
new_labels : list-like
The labels to replace with.
old_labels : list-like
The labels to replace.
Returns
-------
list-like
The validated labels.
"""
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, "
+ f"new values have {new_len} elements"
)
return new_labels
def _get_index(self):
"""
Get the index from the cache object.
Returns
-------
pandas.Index
An index object containing the row labels.
"""
return self._index_cache
def _get_columns(self):
"""
Get the columns from the cache object.
Returns
-------
pandas.Index
An index object containing the column labels.
"""
return self._columns_cache
def _set_index(self, new_index):
"""
Replace the current row labels with new labels.
Parameters
----------
new_index : list-like
The new row labels.
"""
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
self.synchronize_labels(axis=0)
def _set_columns(self, new_columns):
"""
Replace the current column labels with new labels.
Parameters
----------
new_columns : list-like
The new column labels.
"""
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
if self._dtypes is not None:
self._dtypes.index = new_columns
self.synchronize_labels(axis=1)
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
@property
def axes(self):
"""
Get index and columns that can be accessed with an `axis` integer.
Returns
-------
list
List with two values: index and columns.
"""
return [self.index, self.columns]
def _compute_axis_labels(self, axis: int, partitions=None):
"""
Compute the labels for specific `axis`.
Parameters
----------
axis : int
Axis to compute labels along.
partitions : np.ndarray, optional
A 2D NumPy array of partitions from which labels will be grabbed.
If not specified, partitions will be taken from `self._partitions`.
Returns
-------
pandas.Index
Labels for the specified `axis`.
"""
if partitions is None:
partitions = self._partitions
return self._partition_mgr_cls.get_indices(
axis, partitions, lambda df: df.axes[axis]
)
def _filter_empties(self):
"""Remove empty partitions from `self._partitions` to avoid triggering excess computation."""
if len(self.axes[0]) == 0 or len(self.axes[1]) == 0:
# This is the case for an empty frame. We don't want to completely remove
# all metadata and partitions so for the moment, we won't prune if the frame
# is empty.
# TODO: Handle empty dataframes better
return
self._partitions = np.array(
[
[
self._partitions[i][j]
for j in range(len(self._partitions[i]))
if j < len(self._column_widths) and self._column_widths[j] != 0
]
for i in range(len(self._partitions))
if i < len(self._row_lengths) and self._row_lengths[i] != 0
]
)
self._column_widths_cache = [w for w in self._column_widths if w != 0]
self._row_lengths_cache = [r for r in self._row_lengths if r != 0]
def synchronize_labels(self, axis=None):
"""
Set the deferred axes variables for the ``PandasDataframe``.
Parameters
----------
axis : int, default: None
The deferred axis.
0 for the index, 1 for the columns.
"""
if axis is None:
self._deferred_index = True
self._deferred_column = True
elif axis == 0:
self._deferred_index = True
else:
self._deferred_column = True
def _propagate_index_objs(self, axis=None):
"""
Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.
Adds `set_axis` function to call-queue of each partition from `self._partitions`
to apply new axis.
Parameters
----------
axis : int, default: None
The axis to apply to. If it's None applies to both axes.
"""
self._filter_empties()
if axis is None or axis == 0:
cum_row_lengths = np.cumsum([0] + self._row_lengths)
if axis is None or axis == 1:
cum_col_widths = np.cumsum([0] + self._column_widths)
if axis is None:
def apply_idx_objs(df, idx, cols):
return df.set_axis(idx, axis="index", inplace=False).set_axis(
cols, axis="columns", inplace=False
)
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._deferred_index = False
self._deferred_column = False
elif axis == 0:
def apply_idx_objs(df, idx):
return df.set_axis(idx, axis="index", inplace=False)
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._deferred_index = False
elif axis == 1:
def apply_idx_objs(df, cols):
return df.set_axis(cols, axis="columns", inplace=False)
self._partitions = np.array(
[
[
self._partitions[i][j].add_to_apply_calls(
apply_idx_objs,
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._deferred_column = False
else:
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
@lazy_metadata_decorator(apply_axis=None)
def mask(
self,
row_labels: Optional[List[Hashable]] = None,
row_positions: Optional[List[int]] = None,
col_labels: Optional[List[Hashable]] = None,
col_positions: Optional[List[int]] = None,
) -> "PandasDataframe":
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_labels : list of hashable, optional
The row labels to extract.
row_positions : list-like of ints, optional
The row positions to extract.
col_labels : list of hashable, optional
The column labels to extract.
col_positions : list-like of ints, optional
The column positions to extract.
Returns
-------
PandasDataframe
A new PandasDataframe from the mask provided.
Notes
-----
If both `row_labels` and `row_positions` are provided, a ValueError is raised.
The same rule applies for `col_labels` and `col_positions`.
"""
if check_both_not_none(row_labels, row_positions):
raise ValueError(
"Both row_labels and row_positions were provided - please provide only one of row_labels and row_positions."
)
if check_both_not_none(col_labels, col_positions):
raise ValueError(
"Both col_labels and col_positions were provided - please provide only one of col_labels and col_positions."
)
indexers = []
for axis, indexer in enumerate((row_positions, col_positions)):
if is_range_like(indexer):
if indexer.step == 1 and len(indexer) == len(self.axes[axis]):
# By this function semantics, `None` indexer is a full-axis access
indexer = None
elif indexer is not None and not isinstance(indexer, pandas.RangeIndex):
# Pure python's range is not fully compatible with a list of ints,
# converting it to ``pandas.RangeIndex``` that is compatible.
indexer = pandas.RangeIndex(
indexer.start, indexer.stop, indexer.step
)
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not (indexer is None or
|
is_list_like(indexer)
|
pandas.core.dtypes.common.is_list_like
|
import os
import subprocess
from multiprocessing import Process
import time
import pandas as pd
import prctl
import resource
import logging
logger = logging.getLogger(__name__)
qlogger = logging.getLogger("QEMU-" + __name__)
class Trigger:
def __init__(self, trigger_address, trigger_hitcounter):
"""
Define attributes for trigger
"""
self.address = trigger_address
self.hitcounter = trigger_hitcounter
class Fault:
def __init__(self,
fault_address: int,
fault_type: int,
fault_model: int,
fault_lifespan: int,
fault_mask: int,
trigger_address: int,
trigger_hitcounter: int):
"""
Define attributes for fault types
"""
self.trigger = Trigger(trigger_address, trigger_hitcounter)
self.address = fault_address
self.type = fault_type
self.model = fault_model
self.lifespan = fault_lifespan
self.mask = fault_mask
def write_to_fifo(self, fifo):
"Write data to the config fifo, which sends binary data"
numbytes = fifo.write(self.address.to_bytes(8, byteorder='big'))
numbytes = numbytes + fifo.write(self.type.to_bytes(8, byteorder='big'))
numbytes = numbytes + fifo.write(self.model.to_bytes(8, byteorder='big'))
numbytes = numbytes + fifo.write(self.lifespan.to_bytes(8, byteorder='big'))
numbytes = numbytes + fifo.write(self.mask.to_bytes(16, byteorder='big'))
numbytes = numbytes + fifo.write(self.trigger.address.to_bytes(8, byteorder='big'))
numbytes = numbytes + fifo.write(self.trigger.hitcounter.to_bytes(8, byteorder='big'))
fifo.flush()
return numbytes
def write_to_fifo_new(self, fifo):
out = "\n$$[Fault]\n"
out = out + "% {:d} | {:d} | {:d} | {:d} | {:d} | {:d} | ".format(self.address, self.type, self.model, self.lifespan, self.trigger.address, self.trigger.hitcounter)
tmp = self.mask - pow(2, 64)
if tmp < 0:
tmp = 0
out = out + " {:d} {:d} \n".format(tmp, self.mask - tmp)
out = out + "$$[Fault_Ende]\n"
tmp = fifo.write(out)
fifo.flush()
return tmp
def run_qemu(controll,
config,
data,
qemu_monitor_fifo,
qemu_path,
kernel_path,
plugin_path,
machine,
qemu_output,
index,
qemu_custom_paths=None):
"""
This function calls qemu with the required arguments.
"""
ps = None
try:
prctl.set_name("qemu{}".format(index))
prctl.set_proctitle("qemu_for_{}".format(index))
t0 = time.time()
qlogger.debug("start qemu for exp {}".format(index))
if qemu_output is True:
output = "-d plugin"
else:
output = " "
if qemu_custom_paths is None:
qemu_custom_paths = " "
qemustring = "{3!s} -plugin {5!s},arg=\"{0!s}\",arg=\"{1!s}\",arg=\"{2!s}\" {6!s} {7!s} -M {8!s} -monitor none -kernel {4!s}".format(controll, config, data, qemu_path, kernel_path, plugin_path, output, qemu_custom_paths, machine)
ps = subprocess.Popen(qemustring, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while ps.poll() is None:
tmp = ps.stdout.read()
if qemu_output is True:
f = open("log_{}.txt".format(index), 'wt', encoding='utf-8')
f.write(tmp.decode('utf-8'))
qlogger.debug(tmp.decode('utf-8'))
qlogger.info("Ended qemu for exp {}! Took {}".format(index,
time.time()-t0)
)
except KeyboardInterrupt:
ps.kill()
logger.warning("Terminate QEMU {}".format(index))
def readout_tbinfo(line):
"""
Builds the dict for tb info from line provided by qemu
"""
split = line.split('|')
tb = {}
tb['id'] = int(split[0], 0)
tb['size'] = int(split[1], 0)
tb['ins_count'] = int(split[2], 0)
tb['num_exec'] = int(split[3], 0)
tb['assembler'] = split[4].replace('!!', '\n')
return tb
def diff_tbinfo(tblist, goldenrun_tblist):
"""
Diff tblist with golden runs tblist. Convert to pandas data frame for performance reasons.
Naive implementation is too slow for larger datasets.
Also added two times golden run concat to cancel it out and only find diff to df1
"""
df1 = pd.DataFrame(tblist)
df2 = goldenrun_tblist
diff = pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
tblist_dif = diff.to_dict('records')
return tblist_dif
def readout_tbexec(line, tbexeclist, tbinfo, goldenrun):
"""
Builds the dict for tb exec from line provided by qemu
"""
split = line.split('|')
# generate list element
execdic = {}
execdic['tb'] = int(split[0], 0)
execdic['pos'] = int(split[1], 0)
return execdic
def build_filters(tbinfogolden):
"""
Build for each tb in tbinfo a filter
"""
filter_return = []
"""Each assembler string"""
for tb in tbinfogolden['assembler']:
tb_filter = []
"""remove first split, as it is empty"""
split = tb.split('[ ')
"""For each line"""
for sp in split[1:]:
"""select address"""
s = sp.split(']')
"""Add to filter"""
tb_filter.append(int("0x"+s[0].strip(), 0))
"""Sort addresses"""
tb_filter.sort()
"""Reverse list so that last element is first"""
tb_filter.reverse()
"""Append to filter list"""
filter_return.append(tb_filter)
"""Filter list for length of filter, so that the longest one is tested first"""
filter_return.sort(key=len)
filter_return.reverse()
return filter_return
def recursive_filter(tbexecpd, tbinfopd, index, filt):
"""
Search if each element in filt exists in tbexec after index
"""
"""Make sure we do not leave Pandas frame"""
if not ((index >= 0) and index < len(tbexecpd)):
return [False, tbexecpd, tbinfopd]
"""Select element to test"""
tb = tbexecpd.loc[index]
"""Make sure it is part of filter"""
if (tb['tb'] == filt[0]):
if len(filt) == 1:
"""Reached start of original tb"""
return [True, tbexecpd, tbinfopd]
else:
"""pop filter element and increase index in tbexec pandas frame"""
fi = filt.pop(0)
index = index + 1
"""Call recursively"""
[flag, tbexecpd, tbinfopd] = recursive_filter(tbexecpd, tbinfopd, index, filt)
index = index - 1
"""If true, we have a match"""
if flag is True:
"""Invalidate element in tb exec list"""
tbexecpd.at[index, 'tb'] = -1
tbexecpd.at[index, 'tb-1'] = -1
"""Search tb in tb info"""
idx = tbinfopd.index[tbinfopd['id'] == fi]
for ind in idx:
"""Only invalidate if tb only contains one element, as these are artefacts of singlestep"""
if tbinfopd.at[ind, 'ins_count'] == 1:
tbinfopd.at[ind, 'num_exec'] = tbinfopd.at[ind, 'num_exec'] - 1
return [flag, tbexecpd, tbinfopd]
else:
return [False, tbexecpd, tbinfopd]
def decrese_tb_info_element(tb_id, number, tbinfopd):
"""Find all matches to the tb id"""
idx = tbinfopd.index[tbinfopd['id'] == tb_id]
"""Decrement all matches by number of occurrence in tb exec"""
for i in idx:
tbinfopd.at[i, 'num_exec'] = tbinfopd.at[i, 'num_exec'] - number
def filter_function(tbexecpd, filt, tbinfopd):
"""Find all possible matches for first element of filter"""
idx = tbexecpd.index[(tbexecpd['tb'] == filt[0])]
for f in filt[1:]:
"""Increment to next possible match position"""
idx = idx + 1
"""Find all possible matches for next filter value"""
tmp = tbexecpd.index[(tbexecpd['tb']) == f]
"""Find matching indexes between both indexes"""
idx = idx.intersection(tmp)
"""We now will step through the filter backwards"""
filt.reverse()
for f in filt[1:]:
"""Decrement positions"""
idx = idx - 1
for i in idx:
"""Invalidate all positions"""
tbexecpd.at[i, 'tb'] = -1
tbexecpd.at[i, 'tb-1'] = -1
"""Decrement artefacts in tb info list"""
decrese_tb_info_element(f, len(idx), tbinfopd)
def filter_tb(tbexeclist, tbinfo, tbexecgolden, tbinfogolden, id_num):
"""
First create filter list, then find start of filter, then call recursive filter
"""
filters = build_filters(tbinfogolden)
tbexecpd = tbexeclist
"""Sort and re-index tb exec list"""
tbexecpd.sort_values(by=['pos'], ascending=False, inplace=True)
tbexecpd.reset_index(drop=True, inplace=True)
tbexecpd['tb-1'] = tbexecpd['tb'].shift(periods=-1, fill_value=0)
"""Generate pandas frame for tbinfo"""
tbinfopd = pd.DataFrame(tbinfo)
for filt in filters:
"""Only if filter has more than one element"""
if len(filt) > 1:
"""Perform search and invalidation of found matches"""
filter_function(tbexecpd, filt, tbinfopd)
diff = len(tbexecpd)
""" Search found filter matches """
idx = tbexecpd.index[tbexecpd['tb-1'] == -1]
"""Drop them from table"""
tbexecpd.drop(idx, inplace=True)
"""Drop temporary column"""
tbexecpd.drop(columns=['tb-1'], inplace=True)
"""Reverse list, because it is given reversed from qemu"""
tbexecpd.sort_values(by=['pos'], inplace=True)
""" Fix broken position index"""
tbexecpd.reset_index(drop=True, inplace=True)
tbexecpd['pos'] = tbexecpd.index
"""Again reverse list to go back to original orientation"""
tbexecpd = tbexecpd.iloc[::-1]
logger.debug("worker {} length diff of tbexec {}".format(id_num, diff - len(tbexecpd)))
diff = len(tbinfopd)
"""Search each tb info, that was completely removed from tbexec list"""
idx = tbinfopd.index[tbinfopd['num_exec'] <= 0]
"""Drop the now not relevant tbinfo elements"""
tbinfopd.drop(idx, inplace=True)
logger.debug("worker {} Length diff of tbinfo {}".format(id_num, diff - len(tbinfopd)))
return [tbexecpd, tbinfopd.to_dict('records')]
def diff_tbexec(tbexeclist, goldenrun_tbexeclist):
"""
Diff tbexeclist with golden runs tbexeclist. Convert to pandas
dataframe for performance reasons.
Naive implementation is too slow for larger datasets.
Also added two times golden run concat to cancel it out and only find
diff to df1
"""
df1 = tbexeclist
df2 = goldenrun_tbexeclist
diff = pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
tbexeclist_diff = diff.to_dict('records')
return tbexeclist_diff
def readout_meminfo(line):
"""
Builds the dict for memory info from line provided by qemu
"""
split = line.split('|')
mem = {}
mem['ins'] = int(split[0], 0)
mem['size'] = int(split[1], 0)
mem['address'] = int(split[2], 0)
mem['direction'] = int(split[3], 0)
mem['counter'] = int(split[4], 0)
mem['tbid'] = 0
return mem
def diff_meminfo(meminfolist, goldenrun_meminfolist):
"""
Diff meminfo with golden runs meminfo. Convert to pandas dataframe
for performance reasons. Naive implementation is too slow for larger
datasets. Also added two times golden run concat to cancel it out
and only find diff to df1
"""
df1 = pd.DataFrame(meminfolist)
df2 = goldenrun_meminfolist
diff = pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
meminfolist_diff = diff.to_dict('records')
return meminfolist_diff
def connect_meminfo_tb(meminfolist, tblist):
for meminfo in meminfolist:
for tbinfo in tblist:
if meminfo['ins'] > tbinfo['id'] and meminfo['ins'] < tbinfo['id'] + tbinfo['size']:
meminfo['tbid'] = tbinfo['id']
break
def readout_memdump(line, memdumplist, memdumpdict, memdumptmp):
"""
This function will readout the lines. If it receives memorydump, it
means a new configured dump will be transmitted. Therefore the
dictionary is initialised. If only B: is received, Binary data is
transmitted. If Dump end is received, the dump is finished and needs
to be appended to the dic, as multiple dumps are possible. If
memorydump end is received, the current memorydump is finished and
is added to the memdumplist
"""
if '[memorydump]' in line:
split = line.split(']:')
info = split[1]
split = info.split('|')
memdumpdict['address'] = int(split[0], 0)
memdumpdict['len'] = int(split[1], 0)
memdumpdict['numdumps'] = int(split[2], 0)
memdumpdict['dumps'] = []
if 'B:' in line:
split = line.split('B: ')
binary = split[1].split(' ')
for b in binary:
memdumptmp.append(int(b, 0))
if '[Dump end]' in line:
memdumpdict['dumps'].append(memdumptmp)
memdumptmp = []
if '[memorydump end]' in line:
memdumplist.append(memdumpdict)
memdumpdict = {}
return [memdumplist, memdumpdict, memdumptmp]
def readout_arm_registers(line):
split = line.split('|')
armregisters = {}
armregisters['pc'] = int(split[0])
armregisters['tbcounter'] = int(split[1])
armregisters['r0'] = int(split[2])
armregisters['r1'] = int(split[3])
armregisters['r2'] = int(split[4])
armregisters['r3'] = int(split[5])
armregisters['r4'] = int(split[6])
armregisters['r5'] = int(split[7])
armregisters['r6'] = int(split[8])
armregisters['r7'] = int(split[9])
armregisters['r8'] = int(split[10])
armregisters['r9'] = int(split[11])
armregisters['r10'] = int(split[12])
armregisters['r11'] = int(split[13])
armregisters['r12'] = int(split[14])
armregisters['r13'] = int(split[15])
armregisters['r14'] = int(split[16])
armregisters['r15'] = int(split[17])
armregisters['xpsr'] = int(split[18])
return armregisters
def readout_riscv_registers(line):
split = line.split('|')
riscvregister = {}
riscvregister['pc'] = int(split[0])
riscvregister['tbcounter'] = int(split[1])
riscvregister['x0'] = int(split[2])
riscvregister['x1'] = int(split[3])
riscvregister['x2'] = int(split[4])
riscvregister['x3'] = int(split[5])
riscvregister['x4'] = int(split[6])
riscvregister['x5'] = int(split[7])
riscvregister['x6'] = int(split[8])
riscvregister['x7'] = int(split[9])
riscvregister['x8'] = int(split[10])
riscvregister['x9'] = int(split[11])
riscvregister['x10'] = int(split[12])
riscvregister['x11'] = int(split[13])
riscvregister['x12'] = int(split[14])
riscvregister['x13'] = int(split[15])
riscvregister['x14'] = int(split[16])
riscvregister['x15'] = int(split[17])
riscvregister['x16'] = int(split[18])
riscvregister['x17'] = int(split[19])
riscvregister['x18'] = int(split[20])
riscvregister['x19'] = int(split[21])
riscvregister['x20'] = int(split[22])
riscvregister['x21'] = int(split[23])
riscvregister['x22'] = int(split[24])
riscvregister['x23'] = int(split[25])
riscvregister['x24'] = int(split[26])
riscvregister['x25'] = int(split[27])
riscvregister['x26'] = int(split[28])
riscvregister['x27'] = int(split[29])
riscvregister['x28'] = int(split[30])
riscvregister['x29'] = int(split[31])
riscvregister['x30'] = int(split[32])
riscvregister['x31'] = int(split[33])
riscvregister['x32'] = int(split[34])
return riscvregister
def readout_tb_faulted(line):
split = line.split('|')
tbfaulted = {}
tbfaulted['faultaddress'] = int(split[0], 0)
tbfaulted['assembly'] = (split[1].replace('!!', '\n'))
return tbfaulted
def diff_arm_registers(armregisterlist, goldenarmregisterlist):
df1 = pd.DataFrame(armregisterlist)
df2 = goldenarmregisterlist
diff = pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
armregister_diff = diff.to_dict('records')
return armregister_diff
def diff_tables(table, goldentable):
"""
This function expects a table and its golden table as pandas dataframe
"""
return pd.concat([table, goldentable, goldentable]).drop_duplicates(keep=False)
def convert_pd_frame_to_list(table):
return table.to_dict('records')
def readout_data(pipe,
index,
q,
faultlist,
goldenrun_data,
q2=None,
qemu_post=None,
qemu_pre_data=None):
"""
This function will permanently try to read data from data pipe
Furthermore it then builds the internal representation, which is collected
by the process writing to hdf 5 file
"""
state = 'None'
tblist = []
tbexeclist = []
pdtbexeclist = None
memlist = []
memdumpdict = {}
memdumplist = []
memdumptmp = []
registerlist = []
tbfaultedlist = []
tbinfo = 0
tbexec = 0
meminfo = 0
memdump = 0
endpoint = 0
mem = 0
regtype = None
tbfaulted = 0
while(1):
line = pipe.readline()
if '$$$' in line:
line = line[3:]
if '[Endpoint]' in line:
split = line.split(']:')
endpoint = int(split[1], 0)
elif '[TB Information]' in line:
state = 'tbinfo'
tbinfo = 1
elif '[TB Exec]' in line:
state = 'tbexec'
tbexec = 1
elif '[Mem Information]' in line:
tbexeclist.reverse()
state = 'meminfo'
meminfo = 1
elif '[Memdump]' in line:
state = 'memdump'
memdump = 1
elif '[END]' in line:
state = 'none'
logger.info("Data received now on post processing for Experiment {}".format(index))
tmp = 0
if tbexec == 1:
if pdtbexeclist is not None:
tmp = pd.DataFrame(tbexeclist)
pdtbexeclist = pd.concat([pdtbexeclist, tmp],
ignore_index=True)
else:
pdtbexeclist = pd.DataFrame(tbexeclist)
tmp = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if q2 is not None:
q2.put(tmp)
if goldenrun_data is not None:
[pdtbexeclist, tblist] = filter_tb(pdtbexeclist,
tblist,
goldenrun_data['tbexec'],
goldenrun_data['tbinfo'],
index)
if tbinfo == 1 and meminfo == 1:
connect_meminfo_tb(memlist, tblist)
output = {}
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if q2 is not None:
q2.put(tmp)
if tmp > mem:
mem = tmp
if tbinfo == 1:
if goldenrun_data is not None:
output['tbinfo'] = diff_tbinfo(tblist,
goldenrun_data['tbinfo'])
else:
output['tbinfo'] = tblist
if tbexec == 1:
if goldenrun_data is not None:
output['tbexec'] = diff_tbexec(pdtbexeclist,
goldenrun_data['tbexec'])
else:
output['tbexec'] = convert_pd_frame_to_list(pdtbexeclist)
if meminfo == 1:
if goldenrun_data is not None:
output['meminfo'] = diff_meminfo(memlist,
goldenrun_data['meminfo'])
else:
output['meminfo'] = memlist
if goldenrun_data is not None:
if regtype == 'arm':
output['armregisters'] = diff_arm_registers(registerlist, goldenrun_data['armregisters'])
if regtype == 'riscv':
output['riscvregisters'] = diff_arm_registers(registerlist, goldenrun_data['riscvregisters'])
else:
if regtype == 'arm':
output['armregisters'] = registerlist
if regtype == 'riscv':
output['riscvregisters'] = registerlist
if tbfaulted == 1:
output['tbfaulted'] = tbfaultedlist
output['index'] = index
output['faultlist'] = faultlist
output['endpoint'] = endpoint
if memdump == 1:
output['memdumplist'] = memdumplist
tmp = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if q2 is not None:
q2.put(tmp)
if tmp > mem:
mem = tmp
if callable(qemu_post):
output = qemu_post(qemu_pre_data, output)
q.put(output)
tmp = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if q2 is not None:
q2.put(tmp)
if tmp > mem:
mem = tmp
break
elif '[Arm Registers]' in line:
state = 'armregisters'
regtype = 'arm'
elif '[RiscV Registers]' in line:
state = 'riscvregisters'
regtype = 'riscv'
elif '[TB Faulted]' in line:
state = 'tbfaulted'
tbfaulted = 1
else:
logger.warning("Command in exp {} not understood {}".format(index, line))
state = 'None'
elif '$$' in line:
line = line[2:]
if 'tbinfo' in state:
tblist.append(readout_tbinfo(line))
elif 'tbexec' in state:
tbexeclist.append(readout_tbexec(line, tbexeclist,
tblist, goldenrun_data))
if len(tbexeclist) > 10000:
if pdtbexeclist is None:
pdtbexeclist =
|
pd.DataFrame(tbexeclist)
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
from pandas.core import common as com
from pandas.util import testing as tm
class TestCaching:
def test_slice_consolidate_invalidate_item_cache(self):
# this is chained assignment, but will 'work'
with option_context('chained_assignment', None):
# #3970
df = DataFrame({"aa": np.arange(5), "bb": [2.2] * 5})
# Creates a second float block
df["cc"] = 0.0
# caches a reference to the 'bb' series
df["bb"]
# repr machinery triggers consolidation
repr(df)
# Assignment to wrong series
df['bb'].iloc[0] = 0.17
df._clear_item_cache()
tm.assert_almost_equal(df['bb'][0], 0.17)
def test_setitem_cache_updating(self):
# GH 5424
cont = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']
for do_ref in [False, False]:
df = DataFrame({'a': cont,
"b": cont[3:] + cont[:3],
'c': np.arange(7)})
# ref the cache
if do_ref:
df.loc[0, "c"]
# set it
df.loc[7, 'c'] = 1
assert df.loc[0, 'c'] == 0.0
assert df.loc[7, 'c'] == 1.0
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame({'A': [600, 600, 600]},
index=date_range('5/7/2014', '5/9/2014'))
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})
# loop through df to update out
six = Timestamp('5/7/2014')
eix = Timestamp('5/9/2014')
for ix, row in df.iterrows():
out.loc[six:eix, row['C']] = out.loc[six:eix, row['C']] + row['D']
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out['A'], expected['A'])
# try via a chain indexing
# this actually works
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
for ix, row in df.iterrows():
v = out[row['C']][six:eix] + row['D']
out[row['C']][six:eix] = v
tm.assert_frame_equal(out, expected)
|
tm.assert_series_equal(out['A'], expected['A'])
|
pandas.util.testing.assert_series_equal
|
"""Main module
# Resources
- Reference google sheets:
- Source data: https://docs.google.com/spreadsheets/d/1jzGrVELQz5L4B_-DqPflPIcpBaTfJOUTrVJT5nS_j18/edit#gid=1335629675
- Source data (old): https://docs.google.com/spreadsheets/d/17hHiqc6GKWv9trcW-lRnv-MhZL8Swrx2/edit#gid=1335629675
- Output example: https://docs.google.com/spreadsheets/d/1uroJbhMmOTJqRkTddlSNYleSKxw4i2216syGUSK7ZuU/edit?userstoinvite=<EMAIL>&actionButton=1#gid=435465078
"""
import json
import os
import pickle
import sys
from copy import copy
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, OrderedDict
from uuid import uuid4
import pandas as pd
try:
import ArgumentParser
except ModuleNotFoundError:
from argparse import ArgumentParser
from vsac_wrangler.config import CACHE_DIR, DATA_DIR, PROJECT_ROOT
from vsac_wrangler.definitions.constants import FHIR_JSON_TEMPLATE
from vsac_wrangler.google_sheets import get_sheets_data
from vsac_wrangler.vsac_api import get_ticket_granting_ticket, get_value_sets
from vsac_wrangler.interfaces._cli import get_parser
# USER1: This is an actual ID to a valid user in palantir, who works on our BIDS team.
PROJECT_NAME = 'RP-4A9E27'
PALANTIR_ENCLAVE_USER_ID_1 = 'a39723f3-dc9c-48ce-90ff-06891c29114f'
PARSE_ARGS = get_parser().parse_args() # for convenient access later
OUTPUT_NAME ='palantir-three-file' # currently this is the only value used
SOURCE_NAME ='vsac'
def get_runtime_provenance() -> str:
"""Get provenance info related to this runtime operation"""
return f'oids from {PARSE_ARGS.input_path} => VSAC trad API => 3-file dir {get_out_dir()}'
def format_label(label, verbose_prefix=False) -> str:
"""Adds prefix and trims whitespace"""
label = label.strip()
prefix = 'VSAC' if not verbose_prefix else get_runtime_provenance()
return f'[{prefix}] {label}'
def get_out_dir(output_name=OUTPUT_NAME, source_name=SOURCE_NAME) -> str:
date_str = datetime.now().strftime('%Y.%m.%d')
out_dir = os.path.join(DATA_DIR, output_name, source_name, date_str, 'output')
return out_dir
# to-do: Shared lib for this stuff?
# noinspection DuplicatedCode
def _save_csv(df: pd.DataFrame, filename, output_name=OUTPUT_NAME, source_name=SOURCE_NAME, field_delimiter=','):
"""Side effects: Save CSV"""
out_dir = get_out_dir(output_name=output_name, source_name=source_name)
os.makedirs(out_dir, exist_ok=True)
output_format = 'csv' if field_delimiter == ',' else 'tsv' if field_delimiter == '\t' else 'txt'
outpath = os.path.join(out_dir, f'{filename}.{output_format}')
df.to_csv(outpath, sep=field_delimiter, index=False)
def _datetime_palantir_format() -> str:
"""Returns datetime str in format used by palantir data enclave
e.g. 2021-03-03T13:24:48.000Z (milliseconds allowed, but not common in observed table)"""
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + 'Z'
def save_json(value_sets, output_structure, json_indent=4) -> List[Dict]:
"""Save JSON"""
# Populate JSON objs
d_list: List[Dict] = []
for value_set in value_sets:
value_set2 = {}
if output_structure == 'fhir':
value_set2 = vsac_to_fhir(value_set)
elif output_structure == 'vsac':
value_set2 = vsac_to_vsac(value_set)
elif output_structure == 'atlas':
raise NotImplementedError('For "atlas" output-structure, output-format "json" not yet implemented.')
d_list.append(value_set2)
# Save file
for d in d_list:
if 'name' in d:
valueset_name = d['name']
else:
valueset_name = d['Concept Set Name']
valueset_name = valueset_name.replace('/', '|')
filename = valueset_name + '.json'
filepath = os.path.join(DATA_DIR, filename)
with open(filepath, 'w') as fp:
if json_indent:
json.dump(d, fp, indent=json_indent)
else:
json.dump(d, fp)
return d_list
# TODO: repurpose this to use VSAC format
# noinspection DuplicatedCode
def vsac_to_fhir(value_set: Dict) -> Dict:
"""Convert VSAC JSON dict to FHIR JSON dict"""
# TODO: cop/paste FHIR_JSON_TEMPLATE literally here instead and use like other func
d: Dict = copy(FHIR_JSON_TEMPLATE)
d['id'] = int(value_set['valueSet.id'][0])
d['text']['div'] = d['text']['div'].format(value_set['valueSet.description'][0])
d['url'] = d['url'].format(str(value_set['valueSet.id'][0]))
d['name'] = value_set['valueSet.name'][0]
d['title'] = value_set['valueSet.name'][0]
d['status'] = value_set['valueSet.status'][0]
d['description'] = value_set['valueSet.description'][0]
d['compose']['include'][0]['system'] = value_set['valueSet.codeSystem'][0]
d['compose']['include'][0]['version'] = value_set['valueSet.codeSystemVersion'][0]
concepts = []
d['compose']['include'][0]['concept'] = concepts
return d
# TODO:
def vsac_to_vsac(v: Dict, depth=2) -> Dict:
"""Convert VSAC JSON dict to OMOP JSON dict
This is the format @DaveraGabriel specified by looking at the VSAC web interface."""
# Attempt at regexp
# Clinical Focus: Asthma conditions which suggest applicability of NHLBI NAEPP EPR3 Guidelines for the Diagnosis and
# Management of Asthma (2007) and the 2020 Focused Updates to the Asthma Management Guidelines),(Data Element Scope:
# FHIR Condition.code),(Inclusion Criteria: SNOMEDCT concepts in "Asthma SCT" and ICD10CM concepts in "Asthma
# ICD10CM" valuesets.),(Exclusion Criteria: none)
# import re
# regexer = re.compile('\((.+): (.+)\)') # fail
# regexer = re.compile('\((.+): (.+)\)[,$]')
# found = regexer.match(value_sets['ns0:Purpose'])
# x1 = found.groups()[0]
purposes = v['ns0:Purpose'].split('),')
d = {
"Concept Set Name": v['@displayName'],
"Created At": 'vsacToOmopConversion:{}; vsacRevision:{}'.format(
datetime.now().strftime('%Y/%m/%d'),
v['ns0:RevisionDate']),
"Created By": v['ns0:Source'],
# "Created By": "https://github.com/HOT-Ecosystem/ValueSet-Converters",
"Intention": {
"Clinical Focus": purposes[0].split('(Clinical Focus: ')[1],
"Inclusion Criteria": purposes[2].split('(Inclusion Criteria: ')[1],
"Data Element Scope": purposes[1].split('(Data Element Scope: ')[1],
"Exclusion Criteria": purposes[3].split('(Exclusion Criteria: ')[1],
},
"Limitations": {
"Exclusion Criteria": "",
"VSAC Note": None, # VSAC Note: (exclude if null)
},
"Provenance": {
"Steward": "",
"OID": "",
"Code System(s)": [],
"Definition Type": "",
"Definition Version": "",
}
}
# TODO: use depth to make this either nested JSON, or, if depth=1, concatenate
# ... all intention sub-fields into a single string, etc.
if depth == 1:
d['Intention'] = ''
elif depth < 1 or depth > 2:
raise RuntimeError(f'vsac_to_vsac: depth parameter valid range: 1-2, but depth of {depth} was requested.')
return d
def get_vsac_csv(
value_sets: List[OrderedDict], google_sheet_name=None, field_delimiter=',', code_delimiter='|', filename='vsac_csv'
) -> pd.DataFrame:
"""Convert VSAC hiearchical XML in a VSAC-oriented tabular file"""
rows = []
for value_set in value_sets:
code_system_codes = {}
name = value_set['@displayName']
purposes = value_set['ns0:Purpose'].split('),')
purposes2 = []
for p in purposes:
i1 = 1 if p.startswith('(') else 0
i2 = -1 if p[len(p) - 1] == ')' else len(p)
purposes2.append(p[i1:i2])
concepts = value_set['ns0:ConceptList']['ns0:Concept']
concepts = concepts if type(concepts) == list else [concepts]
for concept in concepts:
code = concept['@code']
code_system = concept['@codeSystemName']
if code_system not in code_system_codes:
code_system_codes[code_system] = []
code_system_codes[code_system].append(code)
for code_system, codes in code_system_codes.items():
row = {
'name': name,
'nameVSAC': '[VSAC] ' + name,
'oid': value_set['@ID'],
'codeSystem': code_system,
'limitations': purposes2[3],
'intention': '; '.join(purposes2[0:3]),
'provenance': '; '.join([
'Steward: ' + value_set['ns0:Source'],
'OID: ' + value_set['@ID'],
'Code System(s): ' + ','.join(list(code_system_codes.keys())),
'Definition Type: ' + value_set['ns0:Type'],
'Definition Version: ' + value_set['@version'],
'Accessed: ' + str(datetime.now())[0:-7]
]),
}
if len(codes) < 2000:
row['codes'] = code_delimiter.join(codes)
else:
row['codes'] = code_delimiter.join(codes[0:1999])
if len(codes) < 4000:
row['codes2'] = code_delimiter.join(codes[2000:])
else:
row['codes2'] = code_delimiter.join(codes[2000:3999])
row['codes3'] = code_delimiter.join(codes[4000:])
row2 = {}
for k, v in row.items():
row2[k] = v.replace('\n', ' - ') if type(v) == str else v
row = row2
rows.append(row)
# Create/Return DF & Save CSV
df = pd.DataFrame(rows)
_save_csv(df, filename=filename, source_name=google_sheet_name, field_delimiter=field_delimiter)
return df
def get_ids_for_palantir3file(value_sets: pd.DataFrame) -> Dict[str, int]:
oid_enclave_code_set_id_map_csv_path = os.path.join(PROJECT_ROOT, 'data', 'cset.csv')
oid_enclave_code_set_id_df = pd.read_csv(oid_enclave_code_set_id_map_csv_path)
missing_oids = set(value_sets['@ID']) - set(oid_enclave_code_set_id_df['oid'])
if len(missing_oids) > 0:
google_sheet_url = PARSE_ARGS.google_sheet_url
new_ids = [
id for id in oid_enclave_code_set_id_df.internal_id.max() + 1 + range(0, len(missing_oids))]
missing_recs = pd.DataFrame(data={
'source_id_field': ['oid' for i in range(0, len(missing_oids))],
'oid': [oid for oid in missing_oids],
'ccsr_code': [None for i in range(0, len(missing_oids))],
'internal_id': new_ids,
'internal_source': [google_sheet_url for i in range(0, len(missing_oids))],
'cset_source': ['VSAC' for i in range(0, len(missing_oids))],
'grouped_by_bids': [None for i in range(0, len(missing_oids))],
'concept_id': [None for i in range(0, len(missing_oids))],
})
oid_enclave_code_set_id_df =
|
pd.concat([oid_enclave_code_set_id_df, missing_recs])
|
pandas.concat
|
import pandas as pd
import pickle
alter_list =
|
pd.read_pickle("../input/alter_lists.pkl")
|
pandas.read_pickle
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:16:48 2018
@author: cenv0574
"""
import os
import pandas as pd
import numpy as np
import atra.utils
from ras_method import ras_method
import subprocess
import warnings
warnings.filterwarnings('ignore')
data_path= atra.utils.load_config()['paths']['data']
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
def est_trade_value(x,output_new,sector):
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'VA'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
# x['gdp'] = x.gdp*(sec_output.loc[sec_output.region==x.reg1].values[0][2])
return x
def indind_iotable(sup_table,use_table,sectors):
# GET VARIABLES
x = np.array(sup_table.sum(axis=0)) # total production on industry level
g = np.array(sup_table.sum(axis=1)) # total production on product level
F = use_table.iloc[:16,16:].sum(axis=1)
#Numpify
Sup_array = np.asarray(sup_table.iloc[:len(sectors),:len(sectors)]) # numpy array if supply matrix
Use_array = np.asarray(use_table.iloc[:len(sectors),:len(sectors)]) # numpy array of use matrix
g_diag_inv = np.linalg.inv(np.diag(g)) # inverse of g (and diagolinized)
x_diag_inv = np.linalg.inv(np.diag(x)) # inverse of x (and diagolinized)
# Calculate the matrices
B = np.dot(Use_array,x_diag_inv) # B matrix (U*x^-1)
D = np.dot(Sup_array.T,g_diag_inv) # D matrix (V*g^-1)
I_i = np.identity((len(x))) # Identity matrix for industry-to-industry
# Inverse for industry-to-industry
A_ii = np.dot(D,B)
F_ii = np.dot(D,F)/1e6
IDB_inv = np.linalg.inv((I_i-np.dot(D,B))) # (I-DB)^-1
# And canclulate sum of industries
ind = np.dot(IDB_inv,np.dot(D,F)/1e6) # (I-DB)^-1 * DF
IO = pd.concat([pd.DataFrame(np.dot(A_ii,np.diag(ind))),pd.DataFrame(F_ii)],axis=1)
IO.columns = list(use_table.columns[:17])
IO.index = list(use_table.columns[:16])
VA = np.array(list(ind)+[0])-np.array(IO.sum(axis=0))
VA[-1] = 0
IO.loc['ValueA'] = VA
return IO,VA
# =============================================================================
# # Load mapper functions to aggregate tables
# =============================================================================
ind_mapper = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='ind_mapper',header=None)
ind_mapper = dict(zip(ind_mapper[0],ind_mapper[1]))
com_mapper = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='com_mapper',header=None)
com_mapper = dict(zip(com_mapper[0],['P_'+x for x in com_mapper[1]]))
reg_mapper = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='reg_mapper',header=None)
reg_mapper = dict(zip(reg_mapper[0], reg_mapper[1]))
sectors = [chr(i) for i in range(ord('A'),ord('P')+1)]
# =============================================================================
# Load supply table and aggregate
# =============================================================================
sup_table_in = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='Mat Oferta pb',skiprows=2,header=[0,1],index_col=[0,1],nrows=271)
sup_table_in = sup_table_in.drop('Total',level=0,axis=1)
sup_table = sup_table_in.copy()
sup_table.columns = sup_table.columns.get_level_values(0)
sup_table.columns = sup_table.columns.map(ind_mapper)
sup_table = sup_table.T.groupby(level=0,axis=0).sum()
sup_table.columns = sup_table.columns.get_level_values(0)
sup_table.columns = sup_table.columns.map(com_mapper)
sup_table = sup_table.T.groupby(level=0,axis=0).sum()
# =============================================================================
# Load use table and aggregate
# =============================================================================
use_table = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='Mat Utilizacion pc',skiprows=2,header=[0,1],index_col=[0,1],nrows=271)
basic_prod_prices = use_table[['IMPORTACIONES (CIF a nivel de producto y FOB a nivel total)',
'AJUSTE CIF/FOB DE LAS IMPORTACIONES','DERECHOS DE IMPORTACION',
'IMPUESTOS A LOS PRODUCTOS NETOS DE SUBSIDIOS','MARGENES DE COMERCIO',
'MARGENES DE TRANSPORTE','IMPUESTO AL VALOR AGREGADO NO DEDUCIBLE',
]]*-1
use_table = use_table.drop(['PRODUCCION NACIONAL A PRECIOS BASICOS',
'IMPORTACIONES (CIF a nivel de producto y FOB a nivel total)',
'AJUSTE CIF/FOB DE LAS IMPORTACIONES','DERECHOS DE IMPORTACION',
'IMPUESTOS A LOS PRODUCTOS NETOS DE SUBSIDIOS','MARGENES DE COMERCIO',
'MARGENES DE TRANSPORTE','IMPUESTO AL VALOR AGREGADO NO DEDUCIBLE',
'OFERTA TOTAL A PRECIOS DE COMPRADOR','UTILIZACION INTERMEDIA',
'UTILIZACION FINAL','DEMANDA TOTAL'],level=0,axis=1)
basic_prod_prices.columns = basic_prod_prices.columns.get_level_values(0)
basic_prod_prices = basic_prod_prices.T.groupby(level=0,axis=0).sum()
basic_prod_prices.columns = basic_prod_prices.columns.get_level_values(0)
basic_prod_prices.columns = basic_prod_prices.columns.map(com_mapper)
basic_prod_prices = basic_prod_prices.T.groupby(level=0,axis=0).sum()
basic_prod_prices = basic_prod_prices.astype(int)
use_table.columns = use_table.columns.get_level_values(0)
use_table.columns = use_table.columns.map(ind_mapper)
use_table = use_table.T.groupby(level=0,axis=0).sum()
use_table.columns = use_table.columns.get_level_values(0)
use_table.columns = use_table.columns.map(com_mapper)
use_table = use_table.T.groupby(level=0,axis=0).sum()
use_table= pd.concat([use_table,basic_prod_prices],axis=1)
# =============================================================================
# Create IO table and translate to 2016 values
# =============================================================================
IO_ARG,VA = indind_iotable(sup_table,use_table,sectors)
va_new = [498.319,21.986,264.674,1113.747,123.094,315.363,1076.121,168.899,441.293,321.376,750.356,647.929,448.372,426.642,235.624,58.837]
u = ((((np.array(IO_ARG.sum(axis=0)))/VA)[:16])*va_new)
new_fd = (np.array(IO_ARG.iloc[:,16]/(np.array(IO_ARG.sum(axis=0))))*np.array(list(u)+[0]))
new_IO = ras_method(np.array(IO_ARG)[:16,:17],np.array((u)),np.array(list(u-np.array(va_new))+[sum(va_new)]), eps=1e-5)
NEW_IO = pd.DataFrame(new_IO,columns=sectors+['FD'],index=sectors)
NEW_IO.loc['ValueA'] = np.array(list(va_new)+[0])
# =============================================================================
# Save 2016 table and the indices to prepare disaggregation
# =============================================================================
NEW_IO.to_csv(os.path.join(data_path,'mrio_analysis','basetable.csv'),index=False,header=False)
pd.DataFrame([len(sectors+['other1'])*['ARG'],sectors+['other']]).T.to_csv(os.path.join(data_path,'mrio_analysis','indices.csv'),index=False,header=False)
''' First iteration, no trade to determine total regional input and output '''
# =============================================================================
# Load provincial data
# =============================================================================
prov_data = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
# =============================================================================
# Create proxy data for first iteration
# =============================================================================
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join(data_path,'mrio_analysis','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join(data_path,'mrio_analysis','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join(data_path,'mrio_analysis','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join(data_path,'mrio_analysis','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade =
|
pd.DataFrame(columns=['year','gdp'],index= mi_index)
|
pandas.DataFrame
|
import json
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow.keras as keras
import pandas as pd
from datetime import datetime
from termcolor import colored
# Timer.
startTime = datetime.now()
# Path to created json file from mel preprocess and feature extraction script.
DATA_PATH = ".../mel_pitch_shift_9.0.json"
# Path to save model.
MODEL_SAVE = '.../model_1.h5'
# Path to save training history and model accuracy performance at end of training.
HISTORY_SAVE = ".../history_1.csv"
ACC_SAVE = ".../models_acc_1.json"
def load_data(data_path):
"""Loads training dataset from json file.
:param data_path (str): Path to json file containing data
:return X (ndarray): Inputs
:return y (ndarray): Targets
"""
with open(data_path, "r") as fp:
data = json.load(fp)
# Convert lists to numpy arrays.
X = np.array(data["mel"]) # The name in brackets is changed to "mfccs" if MFCC features are used to train.
y = np.array(data["labels"])
return X, y
def prepare_datasets(test_size, validation_size):
# Load extracted features and labels data.
X, y = load_data(DATA_PATH)
# Create train/test split.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
# Create train/validation split.
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)
# 3D array.
X_train = X_train[..., np.newaxis] # 4-dim array: (# samples, # time steps, # coefficients, 1)
X_validation = X_validation[..., np.newaxis]
X_test = X_test[..., np.newaxis]
return X_train, X_validation, X_test, y_train, y_validation, y_test
def build_model(input_shape):
# Create model.
model = keras.Sequential()
# 1st convolutional layer.
model.add(keras.layers.Conv2D(16, (5, 5), activation='relu', input_shape=input_shape))
# 16 kernels, and 5x5 grid size of kernel.
model.add(keras.layers.MaxPool2D((5, 5), strides=(2, 2), padding='same'))
# Pooling size 5x5.
model.add(keras.layers.BatchNormalization())
# Batch Normalization allows model to be more accurate and computations are faster.
# Resize for RNN part.
resize_shape = model.output_shape[2] * model.output_shape[3]
model.add(keras.layers.Reshape((model.output_shape[1], resize_shape)))
# RNN layer.
model.add(keras.layers.LSTM(32, input_shape=input_shape, return_sequences=True))
# Flatten the output and feed into dense layer.
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(32, activation='relu'))
# 32 neurons.
model.add(keras.layers.Dropout(0.3))
# Reduces chances of over fitting.
# Output layer that uses softmax activation.
model.add(keras.layers.Dense(2, activation='softmax'))
# 2 neurons --> depends on how many categories we want to predict.
return model
def predict(model, X, y):
# Random prediction post-training.
X = X[np.newaxis, ...]
prediction = model.predict(X)
# Extract index with max value.
predicted_index = np.argmax(prediction, axis=1)
print("Expected index: {}, Predicted index: {}".format(y, predicted_index))
if __name__ == "__main__":
# Create train, validation and test sets.
X_train, X_validation, X_test, y_train, y_validation, y_test = prepare_datasets(0.25, 0.2) # (test size, val size)
# Early stopping.
callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# Checkpoint.
checkpoint = keras.callbacks.ModelCheckpoint(MODEL_SAVE, monitor='val_loss',
mode='min', save_best_only=True, verbose=1)
# Build the CRNN network.
input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3])
model = build_model(input_shape)
# Compile the network.
optimizer = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
model.summary()
# Train the CRNN.
history = model.fit(X_train, y_train, validation_data=(X_validation, y_validation), batch_size=16, epochs=1000,
callbacks=[callback, checkpoint])
# Save history.
hist =
|
pd.DataFrame(history.history)
|
pandas.DataFrame
|
from .Data import Data
from .Zemberek import Zemberek
from .Esanlam import Esanlam
import pandas as pd
import re
import json
from collections import OrderedDict
from operator import itemgetter
import os
from .Config import dirs as dirs
from .Stops import Stops
from .ITUNLPTools import ITUNLPTools
class Main():
stats = []
num = 1
def __init__(self):
print(dirs._path)
if not os.path.exists(dirs._path + dirs._datapath):
os.makedirs(dirs._path + dirs._datapath)
if not os.path.exists(dirs._path + dirs._datapath + "\\" + dirs._processdir):
os.makedirs(dirs._path + dirs._datapath + "\\" + dirs._processdir)
self.Zemberek = Zemberek()
self.Data = Data()
self.Esanlam = Esanlam()
self.Stops = Stops()
self.ITUNLPTools = ITUNLPTools()
def is_str(self, v):
return type(v) is str
def while_replace(self, string, neddle, haystack):
while neddle in string: string = string.replace(neddle, haystack)
return string
def Tokenize(self, area, newarea=False):
if (newarea == False): newarea = area
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Zemberek.TurkishTextToken(x))
def jsonunicode(self, data):
if isinstance(data, str):
return json.dumps(json.loads(data), ensure_ascii=False)
else:
return ""
def fixUnicode(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.jsonunicode(x))
def fixChars(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__fixcharsworker(x))
def __fixcharsworker(self, x):
if isinstance(x, list):
x = " ".join(x)
newtext = ""
length = 0
charbefore = ""
i = 0
for char in x:
if char == charbefore:
length += 1
if length < 2:
newtext += char
else:
newtext += char
length = 0
i += 1
charbefore = char
if (x != newtext): print(x, newtext)
return self.Zemberek.TurkishTextToken(newtext)
def Clean(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__cleanerworker(x))
def __cleanerworker(self, x):
if isinstance(x, list):
x = " ".join(x)
x = x.replace('-', '')
x = x.replace("'", '')
x = x.replace("â", 'a')
x = x.replace("İ", "i")
x = x.replace("î", 'i')
x = x.replace("î", 'i')
x = re.sub(re.compile(r"[-'\"]"), '', x)
x = re.sub(re.compile(r"[\\][ntrv]"), ' ', x)
x = re.sub(re.compile(r'[^a-zA-ZçığöüşÇİĞÖÜŞ ]'), ' ', x)
x = self.while_replace(x, " ", " ")
x = x.lower()
x = self.Zemberek.TurkishTextToken(x)
return x
def __cleanword(self, x):
if isinstance(x, list):
x = " ".join(x)
x = x.replace('-', '')
x = x.replace("'", '')
x = x.replace("â", 'a')
x = x.replace("İ", "i")
x = x.replace("î", 'i')
x = x.replace("î", 'i')
x = re.sub(re.compile(r"[-'\"]"), '', x)
x = re.sub(re.compile(r"[\\][ntrv]"), ' ', x)
x = re.sub(re.compile(r'[^a-zA-ZçığöüşÇİĞÖÜŞ ]'), ' ', x)
x = self.while_replace(x, " ", "")
x = x.lower()
return x
def lower(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self._lowerworker(x))
def _lowerworker(self, text):
if isinstance(text, str):
tokens = self.Zemberek.TurkishTextToken(text)
else:
tokens = text
newtext = []
for token in tokens:
token = token.replace('İ', 'i')
token = token.replace("ardunio", 'arduino')
token = token.replace("nardunio", 'arduino')
token = token.lower()
newtext.append(token)
return newtext
def Normalize(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Zemberek.TurkishNormalizer(x))
def NormalizeWords(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Zemberek.TurkishWordNormalizer(x))
def sorguBirlestir(self, x, sifirla=False):
if sifirla == True:
self.sorgumetni = ""
self.sorgumetni += "\n\n" + " ".join(x)
def ITUNormalize(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.sorguBirlestir("", True)
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.sorguBirlestir(x))
print(self.sorgumetni)
print(self.ITUNLPTools.ask("normalize", self.sorgumetni))
def NormWithCorr(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] =
|
pd.Series()
|
pandas.Series
|
'''
Tests for bipartitepandas
DATE: March 2021
'''
import pytest
import numpy as np
import pandas as pd
import bipartitepandas as bpd
import pickle
###################################
##### Tests for BipartiteBase #####
###################################
def test_refactor_1():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_2():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2. Time has jumps.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_3():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 2
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 2
def test_refactor_4():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1 -> 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 3})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_5():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 1 -> 0
# Time 1 -> 2 -> 4
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_6():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 2 -> 3 -> 5
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 5})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_7():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_8():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 0 -> 1
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 0
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_9():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([
|
pd.DataFrame(worker, index=[i])
|
pandas.DataFrame
|
from os import error
import os
import re
from flask import Flask, render_template, session, request, redirect, send_from_directory
from flask.helpers import url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.operators import distinct_op
from PIL import Image
import random
import pandas as pd
import numpy as np
from sklearn.decomposition import TruncatedSVD
import pickle
from collections import Counter
from datetime import datetime
# Flask app configs
app=Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///user-data.sqlite3'
app.secret_key = 'soverysecret'
#Database
db = SQLAlchemy(app)
class users(db.Model):
id = db.Column('user_id', db.Integer, primary_key = True)
name = db.Column(db.String(100))
email = db.Column(db.String(100))
password = db.Column(db.String(100))
interests = db.Column(db.String(200))
class images(db.Model):
id = db.Column('image_id', db.Integer, primary_key=True)
title = db.Column(db.String(100))
fields = db.Column(db.String(300))
description = db.Column(db.String(1000))
links = db.Column(db.String(200))
user_id = db.Column(db.Integer)
class user_slugs(db.Model):
id = db.Column('slug_id', db.Integer, primary_key = True)
user_id = db.Column(db.Integer)
bio = db.Column(db.String(1000))
website = db.Column(db.String(100))
class pin_category(db.Model):
id = db.Column('category_id', db.Integer, primary_key=True)
name = db.Column(db.String(200))
class track_visits(db.Model):
id = db.Column('visit_id', db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
img_id = db.Column(db.Integer)
class admin_post(db.Model):
id = db.Column('admin_id', db.Integer, primary_key=True)
advertisement_title = db.Column(db.String(100))
thought_title = db.Column(db.String(100))
advertisement_link = db.Column(db.String(100))
thought_link = db.Column(db.String(100))
date = db.Column(db.String(100))
class follow_user(db.Model):
id = db.Column('follow_id', db.Integer, primary_key=True)
user_email = db.Column(db.String(100))
follower_email = db.Column(db.String(100))
class saved_pins(db.Model):
id = db.Column('save_id', db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
img_id = db.Column(db.Integer)
#Run only to create initial .sqlite database
#db.create_all()
#Data extractors
def extract_users():
records=users.query.all()
id,email,interests=[],[],[]
for rec in records:
var_id,var_name=rec.id,rec.email
for interest in rec.interests.split(","):
id.append(var_id)
email.append(var_name)
interests.append(interest)
df=pd.DataFrame({"user_id":pd.Series(id),"name":pd.Series(email),"category":pd.Series(interests),"flag":pd.Series(np.ones(len(email)))})
return df
def extract_images():
records=images.query.all()
id,title,fields,user_ids=[],[],[],[]
for rec in records:
id.append(rec.id)
title.append(rec.title)
fields.append(rec.fields)
user_ids.append(rec.user_id)
df=pd.DataFrame({"img_id":pd.Series(id),"title":pd.Series(title),"category":pd.Series(fields),"user_id":
|
pd.Series(user_ids)
|
pandas.Series
|
from sklearn.base import BaseEstimator
import pandas as pd
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from itertools import product
import numpy as np
def generate_x_y(data, real_col, cat_col, y):
data_real = data[real_col]
data_cat = data[cat_col]
data_cat =
|
pd.get_dummies(data_cat)
|
pandas.get_dummies
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
Estacion = '6001'
df1 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/6001Historico.txt', parse_dates=[2])
Theoric_rad_method = 'GIS_Model' ##-->> PARA QUE USE EL MODELO DE Gis DEBE SER 'GIS_Model'
resolucion = 'diaria' ##-->> LAS OPCIONES SON 'diaria' U 'horaria'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## ---CALCULO DE LA RADIACIÓN TEORICA--- ##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model_hora(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
'pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_nati/Total_Timeseries.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion_Teo'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
df_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
df_hourly_theoric = df_hourly_theoric[df_hourly_theoric['Radiacion_Teo'] > 0]
return df_hourly_theoric
def Elevation_RadiationTA(n, lat, lon, start):
'Para obtener la radiación en W/m2 y el ángulo de elevación del sol en grados horariamente para un número "n" de ' \
'días aun punto en una latitud y longitud determinada ( "lat-lon"como flotantes) a partir de una fecha de inicio ' \
'"start" como por ejemplo datetime.datetime(2018, 1, 1, 8).'
import pysolar
import pytz
import datetime
timezone = pytz.timezone("America/Bogota")
start_aware = timezone.localize(start)
# Calculate radiation every hour for 365 days
nhr = 24*n
dates, altitudes_deg, radiations = list(), list(), list()
for ihr in range(nhr):
date = start_aware + datetime.timedelta(hours=ihr)
altitude_deg = pysolar.solar.get_altitude(lat, lon, date)
if altitude_deg <= 0:
radiation = 0.
else:
radiation = pysolar.radiation.get_radiation_direct(date, altitude_deg)
dates.append(date)
altitudes_deg.append(altitude_deg)
radiations.append(radiation)
days = [ihr/24 for ihr in range(nhr)]
return days, altitudes_deg, radiations
if Theoric_rad_method != 'GIS_Model' and Estacion == '6001':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.259, -75.588, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6002':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.168, -75.644, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6003':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.255, -75.542, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method == 'GIS_Model':
Io_hora = serie_Kumar_Model_hora(Estacion)
print('Teorica con el modelo de KUMAR')
###############################################################################
##--------------EFICIENCIAS TEORICAS COMO PROXI DE TRANSPARENCIA-------------##
###############################################################################
'Calculo de la eficiencias teorica como proxi de la transparencia de la atmosfera'
'Para esto se hace uso de la información del piranometro y de la radiación teórica'
'de <NAME>, con esto se prentenden obtener las caracteristicas que deriven'
'del análisis estocastico, similar al de <NAME> en su tesis de doctorado.'
##------------------LECTURA DE LOS DATOS DEL EXPERIMENTO----------------------##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de potencia mayores o iguales a 0, los que parecen generarse una'
'hora despues de cuando empieza a incidir la radiación.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P350 = df_P350[(df_P350['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P348 = df_P348[(df_P348['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P975_h = df_P975_h.between_time('06:00', '17:00')
df_P350_h = df_P350_h.between_time('06:00', '17:00')
df_P348_h = df_P348_h.between_time('06:00', '17:00')
##----AJUSTE DE LOS DATOS DE RADIACIÓN TEORICA AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
Io_hora_975 = serie_Kumar_Model_hora('6001')
Io_hora_350 = serie_Kumar_Model_hora('6002')
Io_hora_348 = serie_Kumar_Model_hora('6003')
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
Io_hora_975 = Io_hora_975.loc[(Io_hora_975.index >= '2018-03-20') & (Io_hora_975.index <= '2018-'+str(df_P975.index[-1].month)+'-'+str(df_P975.index[-1].day+1))]
Io_hora_350 = Io_hora_350.loc[(Io_hora_350.index >= '2018-03-22') & (Io_hora_350.index <= '2018-'+str(df_P350.index[-1].month)+'-'+str(df_P350.index[-1].day+1))]
Io_hora_348 = Io_hora_348.loc[(Io_hora_348.index >= '2018-03-23') & (Io_hora_348.index <= '2018-'+str(df_P348.index[-1].month)+'-'+str(df_P348.index[-1].day+1))]
Io_hora_975 = Io_hora_975.between_time('06:00', '17:00')
Io_hora_975.index = [Io_hora_975.index[i].replace(year=2019) for i in range(len(Io_hora_975.index))]
Io_hora_350 = Io_hora_350.between_time('06:00', '17:00')
Io_hora_350.index = [Io_hora_350.index[i].replace(year=2019) for i in range(len(Io_hora_350.index))]
Io_hora_348 = Io_hora_348.between_time('06:00', '17:00')
Io_hora_348.index = [Io_hora_348.index[i].replace(year=2019) for i in range(len(Io_hora_348.index))]
df_Rad_P975 = pd.concat([Io_hora_975, df_P975_h], axis = 1)
df_Rad_P350 = pd.concat([Io_hora_350, df_P350_h], axis = 1)
df_Rad_P348 = pd.concat([Io_hora_348, df_P348_h], axis = 1)
df_Rad_P975 = df_Rad_P975.drop(['NI','strength'], axis=1)
df_Rad_P350 = df_Rad_P350.drop(['NI','strength'], axis=1)
df_Rad_P348 = df_Rad_P348.drop(['NI','strength'], axis=1)
##--------------------EFICIANCIA REAL PROXI DE TRANSPARENCIA-----------------##
df_Rad_P975['Efi_Transp'] = df_Rad_P975['radiacion'] / df_Rad_P975['Radiacion_Teo']
df_Rad_P350['Efi_Transp'] = df_Rad_P350['radiacion'] / df_Rad_P350['Radiacion_Teo']
df_Rad_P348['Efi_Transp'] = df_Rad_P348['radiacion'] / df_Rad_P348['Radiacion_Teo']
##-----------------HORAS EN LA QUE SE PRODUCE LA MAYOR EFICIENCIA Y SU HISTOGRAMA-------------##
'La frecuencia de las horas que excedieron el máximo de la eficiencia (1), se presenta en el hisograma'
'a continuación. El resultado muestra que las mayores frecuencias se presentan a als 6 y las 7 de la ma-'
'ñana, y esto es atribuible a falencias en el modelo de radiacion en condiciones de cierlo despejado'
'en esos puntos.'
Hour_Max_Efi_975 = df_Rad_P975[df_Rad_P975['Efi_Transp']>1].index.hour
Hour_Max_Efi_350 = df_Rad_P350[df_Rad_P350['Efi_Transp']>1].index.hour
Hour_Max_Efi_348 = df_Rad_P348[df_Rad_P348['Efi_Transp']>1].index.hour
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hour_Max_Efi_348, bins='auto', alpha = 0.5)
ax1.set_title(u'Distribución horas de excedencia \n de la eficiencia en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hour_Max_Efi_350, bins='auto', alpha = 0.5)
ax2.set_title(u'Distribución horas de excedencia \n de la eficiencia en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hour_Max_Efi_975, bins='auto', alpha = 0.5)
ax3.set_title(u'Distribución horas de excedencia \n de la eficiencia en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHoraExceEfi.png')
plt.show()
##-------DISCRIMINACION ENTRE DIAS LLUVIOSOS Y SECOS POR PERCENTILES DE RADIACION--------##
'Para lidiar cno la situación en que pueden haber dias en los que los piranometros solo midieron'
'durante una fracción del día por posibles daños y alteraciones, se deben considerar los dias que'
'al menos tuvieron 6 horas de medicion.'
df_Rad_P975_count_h_pira = df_Rad_P975.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P350_count_h_pira = df_Rad_P350.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P348_count_h_pira = df_Rad_P348.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
days_P975_count_h_pira = df_Rad_P975_count_h_pira.index[df_Rad_P975_count_h_pira == True]
days_P350_count_h_pira = df_Rad_P350_count_h_pira.index[df_Rad_P350_count_h_pira == True]
days_P348_count_h_pira = df_Rad_P348_count_h_pira.index[df_Rad_P348_count_h_pira == True]
'Se establecieron umbrales empiricamente para la seleccion de los dias marcadamente nubados y'
'marcadamente despejados dentro el periodo de registro, de acuerdo a los procedimentos en el'
'programa Umbrales_Radiacion_Piranometro.py'
Sum_df_Rad_P975 = df_Rad_P975.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P350 = df_Rad_P350.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P348 = df_Rad_P348.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['radiacion']>0]
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['radiacion']>0]
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['radiacion']>0]
lista_days_975 = []
for i in range(len(Sum_df_Rad_P975)):
if Sum_df_Rad_P975.index[i] in days_P975_count_h_pira:
lista_days_975.append(1)
else:
lista_days_975.append(0)
Sum_df_Rad_P975['days'] = lista_days_975
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['days'] == 1]
Sum_df_Rad_P975 = Sum_df_Rad_P975.drop(['days'], axis = 1)
lista_days_350 = []
for i in range(len(Sum_df_Rad_P350)):
if Sum_df_Rad_P350.index[i] in days_P350_count_h_pira:
lista_days_350.append(1)
else:
lista_days_350.append(0)
Sum_df_Rad_P350['days'] = lista_days_350
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['days'] == 1]
Sum_df_Rad_P350 = Sum_df_Rad_P350.drop(['days'], axis = 1)
lista_days_348 = []
for i in range(len(Sum_df_Rad_P348)):
if Sum_df_Rad_P348.index[i] in days_P348_count_h_pira:
lista_days_348.append(1)
else:
lista_days_348.append(0)
Sum_df_Rad_P348['days'] = lista_days_348
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['days'] == 1]
Sum_df_Rad_P348 = Sum_df_Rad_P348.drop(['days'], axis = 1)
Desp_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion>=(Sum_df_Rad_P975.Radiacion_Teo)*0.85]
Desp_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion>=(Sum_df_Rad_P350.Radiacion_Teo)*0.78]
Desp_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion>=(Sum_df_Rad_P348.Radiacion_Teo)*0.80]
Nuba_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion<=(Sum_df_Rad_P975.Radiacion_Teo)*0.25]
Nuba_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion<=(Sum_df_Rad_P350.Radiacion_Teo)*0.25]
Nuba_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion<=(Sum_df_Rad_P348.Radiacion_Teo)*0.22]
Appended_data_desp_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Appended_data_desp_975.append(df_P975_h[df_P975_h.index.date == Desp_Pira_975.index.date[i]])
Appended_data_desp_975 = pd.concat(Appended_data_desp_975)
Appended_data_desp_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Appended_data_desp_350.append(df_P350_h[df_P350_h.index.date == Desp_Pira_350.index.date[i]])
Appended_data_desp_350 = pd.concat(Appended_data_desp_350)
Appended_data_desp_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Appended_data_desp_348.append(df_P348_h[df_P348_h.index.date == Desp_Pira_348.index.date[i]])
Appended_data_desp_348 = pd.concat(Appended_data_desp_348)
Appended_data_nuba_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Appended_data_nuba_975.append(df_P975_h[df_P975_h.index.date == Nuba_Pira_975.index.date[i]])
Appended_data_nuba_975 = pd.concat(Appended_data_nuba_975)
Appended_data_nuba_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Appended_data_nuba_350.append(df_P350_h[df_P350_h.index.date == Nuba_Pira_350.index.date[i]])
Appended_data_nuba_350 = pd.concat(Appended_data_nuba_350)
Appended_data_nuba_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Appended_data_nuba_348.append(df_P348_h[df_P348_h.index.date == Nuba_Pira_348.index.date[i]])
Appended_data_nuba_348 = pd.concat(Appended_data_nuba_348)
#------------------HISTOGRAMAS DE RADIACION PARA CADA PUNTO EN LOS DOS CASOS----------------##
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Appended_data_desp_348['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Appended_data_nuba_348['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Appended_data_desp_350['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Appended_data_nuba_350['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Appended_data_desp_975['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Appended_data_nuba_975['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoRadiacionNubaDespTotal.png')
plt.show()
#------------------PRUEBA DE KOLMOGOROV-SMIRNOV PARA LA BONDAD DE AJUSTE ----------------##
'Se aplica la prueba de bondad KOLMOGOROV-SMIRNOV sobre los datos de los dias nublados y los'
'despejados con respecto a la serie general de los datos, para evaluar si pertenecen a la '
'funcion de distribución de probabilidad. Se usa un nivel de significancia del 5%. Esta prueba es'
'mas sensible a los valores cercanos a la media que a los extremos, por lo que en general puede'
'usarse para evitar los outliers. La hipotesis nula, será que los datos de ambas series siguen'
'una misma distribución. La hipotesis alternativa sugiere que no sigen la misma distribución.'
Significancia = 0.05
SK_desp_348 = ks_2samp(Appended_data_desp_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_desp = SK_desp_348[0]
pvalue_348_desp = SK_desp_348[1]
SK_nuba_348 = ks_2samp(Appended_data_nuba_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_nuba = SK_nuba_348[0]
pvalue_348_nuba = SK_nuba_348[1]
if pvalue_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if pvalue_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_350 = ks_2samp(Appended_data_desp_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_desp = SK_desp_350[0]
pvalue_350_desp = SK_desp_350[1]
SK_nuba_350 = ks_2samp(Appended_data_nuba_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_nuba = SK_nuba_350[0]
pvalue_350_nuba = SK_nuba_350[1]
if pvalue_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if pvalue_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_975 = ks_2samp(Appended_data_desp_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_desp = SK_desp_975[0]
pvalue_975_desp = SK_desp_975[1]
SK_nuba_975 = ks_2samp(Appended_data_nuba_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_nuba = SK_nuba_975[0]
pvalue_975_nuba = SK_nuba_975[1]
if pvalue_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if pvalue_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------HISTOGRAMAS DE EFICIENCIA PARA CADA PUNTO EN LOS DOS CASOS----------------##
Desp_Efi_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Desp_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Desp_Pira_348.index.date[i]])
Desp_Efi_348 = pd.concat(Desp_Efi_348)
Desp_Efi_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Desp_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Desp_Pira_350.index.date[i]])
Desp_Efi_350 = pd.concat(Desp_Efi_350)
Desp_Efi_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Desp_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Desp_Pira_975.index.date[i]])
Desp_Efi_975 = pd.concat(Desp_Efi_975)
Nuba_Efi_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Nuba_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Nuba_Pira_348.index.date[i]])
Nuba_Efi_348 = pd.concat(Nuba_Efi_348)
Nuba_Efi_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Nuba_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Nuba_Pira_350.index.date[i]])
Nuba_Efi_350 = pd.concat(Nuba_Efi_350)
Nuba_Efi_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Nuba_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Nuba_Pira_975.index.date[i]])
Nuba_Efi_975 = pd.concat(Nuba_Efi_975)
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Desp_Efi_348['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Nuba_Efi_348['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Desp_Efi_350['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Nuba_Efi_350['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Desp_Efi_975['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Nuba_Efi_975['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoEficiencianNubaDespTotal.png')
plt.show()
SK_desp_Efi_348 = ks_2samp(Desp_Efi_348['radiacion'].values,df_P348_h['radiacion'].values)
Efi_348_desp = SK_desp_Efi_348[0]
Efi_348_desp = SK_desp_Efi_348[1]
SK_nuba_Efi_348 = ks_2samp(Nuba_Efi_348['radiacion'].values,df_P348_h['radiacion'].values)
Efi_348_nuba = SK_nuba_Efi_348[0]
Efi_348_nuba = SK_nuba_Efi_348[1]
if Efi_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if Efi_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_Efi_350 = ks_2samp(Desp_Efi_350['radiacion'].values,df_P350_h['radiacion'].values)
Efi_350_desp = SK_desp_Efi_350[0]
Efi_350_desp = SK_desp_Efi_350[1]
SK_nuba_Efi_350 = ks_2samp(Nuba_Efi_350['radiacion'].values,df_P350_h['radiacion'].values)
Efi_350_nuba = SK_nuba_Efi_350[0]
Efi_350_nuba = SK_nuba_Efi_350[1]
if Efi_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if Efi_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_Efi_975 = ks_2samp(Desp_Efi_975['radiacion'].values,df_P975_h['radiacion'].values)
Efi_975_desp = SK_desp_Efi_975[0]
Efi_975_desp = SK_desp_Efi_975[1]
SK_nuba_Efi_975 = ks_2samp(Nuba_Efi_975['radiacion'].values,df_P975_h['radiacion'].values)
Efi_975_nuba = SK_nuba_Efi_975[0]
Efi_975_nuba = SK_nuba_Efi_975[1]
if Efi_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if Efi_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------ESTIMACIÓN DE LA AUTOCORRELACIÓN EN CADA PUNTO----------------##
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
Auto_corr_975 = estimated_autocorrelation(df_P975_h['radiacion'].values)
X = df_P975_h[df_P975_h['radiacion'].values>0]['radiacion'].values
lag = [1, 6, 12, 24]
AutoCorr_lag = []
for j in range(1, len(lag)+1):
print(j)
c = []
for i in range(0,len(X)-j, j):
c.append(pearsonr(X[i:], X[:-(i -len(X))])[1])
AutoCorr_lag.append(sum(c))
###############################################################################
##-------------------RADIACION TEORICA PARA UN AÑO DE DATOS------------------##
###############################################################################
'Se espera encontrar con una año de datos de radiacion teorica para el estable-'
'cimiento de los escenario de prediccion y de los rendimentos teoricos. Pensado'
'para los datos de 2018.'
## ---LECTURA DE DATOS DE PIRANÓMETRO --- ##
df1 = df1.set_index(["fecha_hora"])
df1.index = df1.index.tz_localize('UTC').tz_convert('America/Bogota')
df1.index = df1.index.tz_localize(None)
## ---AGRUPACION DE LOS DATOS HORARIOS A UN AÑO--- ##
df1_hora = df1.groupby(pd.Grouper(freq="H")).mean()
df1_hora = df1_hora[(df1_hora.index >= '2018-01-01 00:00:00') & (df1_hora.index <= '2018-12-31 23:59:00')]
df1_hora = df1_hora.between_time('06:00', '17:00') ##--> Seleccionar solo los datos de horas del dia
## ---CREACIÓN DE LA RADIACIÓN EN SUPERFICIE POR DIA Y AGRUPACION DE LOS DATOS DIARIOS A UN AÑO--- ##
Io_dia = Io.groupby(pd.Grouper(freq="D")).mean()
df1_dia = df1.groupby(pd.Grouper(freq="D")).mean()
df1_dia = df1_dia[(df1_dia.index >= '2018-01-01') & (df1_dia.index <= '2018-12-31')]
## ---CONDICIONANDO LA RESOLUCIÓN TEMPORAL CON LA QUE SE TRABAJARÁ--- ##
if resolucion == 'diaria':
Io = Io_dia
df1_rad = df1_dia
elif resolucion == 'horaria':
Io = Io_hora
df1_rad = df1_hora
## ---CREACIÓN DE LOS ESCENARIOS DE ANÁLISIS EFICIENCIA TEÓRICA--- ##
if len(Io)==len(df1_rad):
df1_rad['TAR'] = Io
df1_rad = df1_rad.drop([u'Unnamed: 0', u'idestacion'], axis=1)
df1_rad['Efi_Teorica'] = df1_rad[u'radiacion']/df1_rad[u'TAR']
else:
print (u'No hay un año de datos con el piranometro')
## --Máximo absosluto
df1_radr_max = df1_rad.loc[lambda df_hora: df_hora['Efi_Teorica'] == np.nanmax(df1_rad.Efi_Teorica)]
## -- Percentil 90 absoluto
df1_rad90 = df1_rad.quantile(0.90)
## -- Percentil 50 absoluto
df1_rad50 = df1_rad.quantile(0.50)
## -- Percentil 10 absoluto
df1_rad10 = df1_rad.quantile(0.10)
## -----MENSUAL----- ##
df1_hm_mean = df1_rad.Efi_Teorica.groupby(pd.Grouper(freq="M")).mean()
df1_hm_mean_90 = df1_hm_mean.loc[lambda df1_hm_mean: df1_hm_mean.round(3) >= round(df1_hm_mean.quantile(0.90), 2)]
df1_hm_mean_50 = df1_hm_mean.loc[lambda df1_hm_mean: df1_hm_mean.round(3) >= round(df1_hm_mean.quantile(0.50), 2)]
df1_hm_mean_10 = df1_hm_mean.loc[lambda df1_hm_mean: df1_hm_mean.round(3) >= round(df1_hm_mean.quantile(0.10), 2)]
## -- Percentil 90 de cada mes
df1_hm_quantile90 = df1_rad.Efi_Teorica.groupby(pd.Grouper(freq="M")).quantile(0.90)
## -- Percentil 50 de cada mes
df1_hm_quantile50 = df1_rad.Efi_Teorica.groupby(
|
pd.Grouper(freq="M")
|
pandas.Grouper
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#RIL Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
tf = 201
#Parameters for residue decomposition (Source: De Rosa et al., 2017)
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
#df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S1')
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
t = range(0,tf,1)
#c_loss_S1 = df1['C_loss'].values
c_firewood_energy_S2 = df2['Firewood_other_energy_use'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S2
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
tf = 201
t = np.arange(tf)
def decomp_S2(t,remainAGB_S2):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2
#set zero matrix
output_decomp_S2 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2[i:,i] = decomp_S2(t[:len(t)-i],remain_part_S2)
print(output_decomp_S2[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2[:,i] = np.diff(output_decomp_S2[:,i])
i = i + 1
print(subs_matrix_S2[:,:4])
print(len(subs_matrix_S2))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2 = subs_matrix_S2.clip(max=0)
print(subs_matrix_S2[:,:4])
#make the results as absolute values
subs_matrix_S2 = abs(subs_matrix_S2)
print(subs_matrix_S2[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2)
subs_matrix_S2 = np.vstack((zero_matrix_S2, subs_matrix_S2))
print(subs_matrix_S2[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2 = (tf,1)
decomp_tot_S2 = np.zeros(matrix_tot_S2)
i = 0
while i < tf:
decomp_tot_S2[:,0] = decomp_tot_S2[:,0] + subs_matrix_S2[:,i]
i = i + 1
print(decomp_tot_S2[:,0])
#S2_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_S2')
tf = 201
t = np.arange(tf)
def decomp_S2_C(t,remainAGB_S2_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_C
#set zero matrix
output_decomp_S2_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_C[i:,i] = decomp_S2_C(t[:len(t)-i],remain_part_S2_C)
print(output_decomp_S2_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_C[:,i] = np.diff(output_decomp_S2_C[:,i])
i = i + 1
print(subs_matrix_S2_C[:,:4])
print(len(subs_matrix_S2_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_C = subs_matrix_S2_C.clip(max=0)
print(subs_matrix_S2_C[:,:4])
#make the results as absolute values
subs_matrix_S2_C = abs(subs_matrix_S2_C)
print(subs_matrix_S2_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_C)
subs_matrix_S2_C = np.vstack((zero_matrix_S2_C, subs_matrix_S2_C))
print(subs_matrix_S2_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_C = (tf,1)
decomp_tot_S2_C = np.zeros(matrix_tot_S2_C)
i = 0
while i < tf:
decomp_tot_S2_C[:,0] = decomp_tot_S2_C[:,0] + subs_matrix_S2_C[:,i]
i = i + 1
print(decomp_tot_S2_C[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_E(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E)
subs_matrix_E = np.vstack((zero_matrix_E, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#E_C
df =
|
pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_E')
|
pandas.read_excel
|
"""
Porfolio models and calculations
"""
from collections import OrderedDict
from scipy import stats
import numpy as np
import pandas as pd
import empyrical as emp
import portfolioopt as pfopt
# calculating portfolio performance
class PortfolioModels():
def __init__(self, datafolder):
self.datafolder = datafolder
self._daily = None
self._calculate_daily()
return None
def _merge_market_with_orders(self, df_ord, mkt):
"""
Helper for merging orders with panel frame with market data
"""
# initialize columns
mkt['cum_size'] = 0
mkt['cum_cost_basis'] = 0
mkt['cum_realized_gain'] = 0
# loop over tickers, except the last one, which is market
for _symbol in mkt.index.get_level_values(0).unique()[:-1]:
df1 = mkt.loc[_symbol]
df2 = df_ord[df_ord['symbol'] == _symbol].copy()
df2.set_index('date', inplace=True)
df = pd.merge(
df1, df2[['cum_size', 'cum_cost_basis', 'cum_realized_gain']],
left_index=True, right_index=True, how='left')
df.rename(columns={
'cum_size_y': 'cum_size',
'cum_cost_basis_y': 'cum_cost_basis',
'cum_realized_gain_y': 'cum_realized_gain'}, inplace=True)
df.drop('cum_size_x', axis=1, inplace=True)
df.drop('cum_cost_basis_x', axis=1, inplace=True)
df.drop('cum_realized_gain_x', axis=1, inplace=True)
# propagate values from last observed
df.fillna(method='ffill', inplace=True)
df.fillna(0, inplace=True)
mkt.loc[_symbol] = df.values
return mkt
def _merge_market_with_dividends(self, df_div, mkt):
"""
Helper to merge the market frame with dividends
"""
# initialize columns
mkt['cum_dividends'] = 0
mkt['dividend_rate'] = 0
# loop over tickers, except the last one, which is market
for _symbol in mkt.index.get_level_values(0).unique()[:-1]:
df1 = mkt.loc[_symbol]
df2 = df_div[df_div['symbol'] == _symbol].copy()
df2.set_index('date', inplace=True)
df = pd.merge(
df1, df2[['cum_dividends', 'rate']],
left_index=True, right_index=True, how='left')
df.drop('cum_dividends_x', axis=1, inplace=True)
df.drop('dividend_rate', axis=1, inplace=True)
df.rename(columns={
'cum_dividends_y': 'cum_dividends',
'rate': 'dividend_rate'}, inplace=True)
# propagate values from last observed
df['cum_dividends'].fillna(method='ffill', inplace=True)
df['cum_dividends'].fillna(0, inplace=True)
mkt.loc[_symbol] = df.values
return mkt
def _calculate_daily(self):
"""
Calculate daily prices, cost-basis, ratios, returns, etc.
Used for plotting and also showing the final snapshot of
the portfolio
-------------
Parameters:
- None
Return:
- Multiindex dataframe with daily values
"""
# read frames for internal use
market = pd.read_pickle(self.datafolder + "/market.pkl")
# recreate orders from open and closed pos
df = pd.concat([
pd.read_pickle(self.datafolder + "/open.pkl"),
pd.read_pickle(self.datafolder + "/closed.pkl")]).sort_index()
# calculate cumulative size and cost basis
df['cum_size'] =\
df.groupby('symbol').signed_size.cumsum()
# cost basis for closed orders is equal to the one for original open
# position, so cumulative does not include any gains or losses from
# closing orders
df['cum_cost_basis'] =\
df.groupby('symbol').current_cost_basis.cumsum()
# aggregate orders on the same day
func = {
'average_price': np.mean,
'current_cost_basis': np.sum,
'current_size': np.sum,
'fees': np.sum,
'final_cost_basis': np.sum,
'final_size': np.sum,
'signed_size': np.sum,
'cum_size': np.sum,
'cum_cost_basis': np.sum,
'realized_gains': np.sum}
df = df.groupby(['date', 'symbol'], as_index=False).agg(func)
# df = pd.pivot_table(df,
# values=func.keys(),
# index=['symbol', 'date'],
# aggfunc=func).reset_index()
# calculate cumulative size and cost basis
df['cum_size'] =\
df.groupby('symbol').signed_size.cumsum()
df['cum_cost_basis'] =\
df.groupby('symbol').current_cost_basis.cumsum()
df['cum_realized_gain'] =\
df.groupby('symbol').realized_gains.cumsum()
# fix the average price, so it is weighted mean
df['average_price'] =\
df['cum_cost_basis'] / df['cum_size']
# merge orders with market
pf = self._merge_market_with_orders(df, market)
df = pd.read_pickle(self.datafolder + "/dividends.pkl")
# calculate cumulative dividends
df['cum_dividends'] = df.groupby('symbol').amount.cumsum()
# merge orders with market
pf = self._merge_market_with_dividends(df, pf)
# replace null stock prices using backfill to avoid issues with
# daily_change and beta calculations
close_price = pf['close']
close_price.values[close_price.values == 0] = np.nan
close_price.fillna(method='bfill', inplace=True)
pf['close'] = close_price
# Main daily portfolio properties
# dividend yield
pf['dividend_yield'] = pf['dividend_rate'] / pf['close'] * 100
# cumulative current value of the position for the given security
# at the start and end of the day
pf['cum_value_close'] = pf['cum_size'] * pf['close']
pf['cum_value_open'] = pf['cum_size'] * pf['open']
# current weight of the given security in the portfolio - matrix
# based on the close price
pf['current_weight'] =\
(pf['cum_value_close'].T /
pf.groupby(level='date')['cum_value_close'].sum()).T
# unrealized gain on open positions at the end of day
pf['cum_unrealized_gain'] =\
pf['cum_value_close'] - pf['cum_cost_basis']
# investment return without dividends
pf['cum_investment_return'] = pf['cum_unrealized_gain'] + \
pf['cum_realized_gain']
# total return
pf['cum_total_return'] = pf['cum_unrealized_gain'] +\
pf['cum_dividends'] + pf['cum_realized_gain']
# return from price change only
pf['cum_price_return'] = pf['cum_unrealized_gain']
# calculate ROI
pf['current_return_rate'] =\
(pf['cum_total_return'] / pf['cum_cost_basis'] * 100).\
where(pf['cum_size'] != 0).fillna(method='ffill')
# assign to panelframe
self._daily = pf
return self
def _observed_period_portfolio_return(self, _):
"""
Calculate actual portfolio return over observed period
"""
pf = self._daily
res = pf.reset_index().pivot(
index='date',
columns='symbol',
values='cum_total_return').sum(axis=1) / \
pf.reset_index().pivot(
index='date',
columns='symbol',
values='cum_cost_basis').sum(axis=1)
return res[-1]
def _observed_period_market_return(self, _):
"""
Calculate actual market return over observed period
"""
pf = self._daily
return (pf.loc['SPY']['close'][-1] - pf.loc['SPY']['close'][0]) / \
pf.loc['SPY']['close'][0]
def _stock_daily_returns(self):
"""
Estimate daily noncumulative returns for empyrical
"""
pf = self._daily
daily = pf.groupby(level='symbol')['close'].\
transform(lambda x: (x-x.shift(1))/abs(x))
daily.fillna(0, inplace=True)
daily = daily.reset_index().pivot(
index='date',
columns='symbol',
values='close')
return daily
def _stock_monthly_returns(self):
"""
Monthly returns = capital gain + dividend yields for all symbols
-------------
Parameters:
- none
Returns:
- dataframe with monthly returns in % by symbol
"""
pf = self._daily
# monthly changes in stock_prices prices
# stock_prices = pf['close']
stock_prices = pf.reset_index().pivot(
index='date',
columns='symbol',
values='close')
stock_month_start = stock_prices.groupby([
lambda x: x.year,
lambda x: x.month]).first()
stock_month_end = stock_prices.groupby([
lambda x: x.year,
lambda x: x.month]).last()
stock_monthly_return = (stock_month_end - stock_month_start) /\
stock_month_start * 100
stock_monthly_div_yield = pf.reset_index().pivot(
index='date',
columns='symbol',
values='dividend_yield').groupby([
lambda x: x.year,
lambda x: x.month]).mean()
stock_monthly_div_yield.fillna(0, inplace=True)
return stock_monthly_return + stock_monthly_div_yield
def _ptf_monthly_returns(self):
"""
monthly changes in portfolio value
using indirect calculation with mean ratios
TODO - implement a more accurate method
-------------
Parameters:
- none
- Using stock prices, portfolio weights on every day and div yield
Returns:
- dataframe with monthly returns in % by symbol
"""
stock_monthly_change = self._stock_monthly_returns()
ptf_monthly_ratio = self._daily.reset_index().pivot(
index='date',
columns='symbol',
values='current_weight').groupby([
lambda x: x.year,
lambda x: x.month]).mean()
ptf_monthly_returns = (
stock_monthly_change * ptf_monthly_ratio).sum(1)
return ptf_monthly_returns
def _one_pfopt_case(self, stock_returns, market, weights, name):
case = {}
case['name'] = name
case['weights'] = weights
returns = np.dot(stock_returns, weights.values.reshape(-1, 1))
returns = pd.Series(returns.flatten(), index=market.index)
simple_stat_funcs = [
emp.annual_return,
emp.annual_volatility,
emp.sharpe_ratio,
emp.stability_of_timeseries,
emp.max_drawdown,
emp.omega_ratio,
emp.calmar_ratio,
emp.sortino_ratio,
emp.value_at_risk,
]
factor_stat_funcs = [
emp.alpha,
emp.beta,
]
stat_func_names = {
'annual_return': 'Annual return',
'annual_volatility': 'Annual volatility',
'alpha': 'Alpha',
'beta': 'Beta',
'sharpe_ratio': 'Sharpe ratio',
'calmar_ratio': 'Calmar ratio',
'stability_of_timeseries': 'Stability',
'max_drawdown': 'Max drawdown',
'omega_ratio': 'Omega ratio',
'sortino_ratio': 'Sortino ratio',
'value_at_risk': 'Daily value at risk',
}
ptf_stats = pd.Series()
for stat_func in simple_stat_funcs:
ptf_stats[stat_func_names[stat_func.__name__]] = stat_func(returns)
for stat_func in factor_stat_funcs:
res = stat_func(returns, market)
ptf_stats[stat_func_names[stat_func.__name__]] = res
case['stats'] = ptf_stats
return case
def stocks_risk(self):
"""
Calculate risk properties for every security in the portfolio
using `empyrical` library.
Results are consistent with self-written routine
References:
1. p. 137 of Modern Portfolio Theory and Investment Analysis
edition 9
2. faculty.washington.edu/ezivot/econ424/portfolioTheoryMatrix.pdf
-------------
Parameters:
- If include risk_free_return or not
- Using stock prices, weight ratios and div yield
Return:
- Dataframe of properties for each security in portfolio
"""
daily = self._stock_daily_returns()
# # construct resulting dataframe
df = pd.DataFrame({
'means': daily.mean(axis=0),
})
simple_stat_funcs = [
emp.annual_return,
emp.annual_volatility,
emp.sharpe_ratio,
emp.calmar_ratio,
emp.stability_of_timeseries,
emp.max_drawdown,
emp.omega_ratio,
emp.sortino_ratio,
stats.skew,
stats.kurtosis,
emp.tail_ratio,
emp.value_at_risk,
]
factor_stat_funcs = [
emp.alpha,
emp.beta,
]
stat_func_names = {
'annual_return': 'Annual return',
'cum_returns_final': 'Cumulative returns',
'annual_volatility': 'Annual volatility',
'alpha': 'Alpha',
'beta': 'Beta',
'sharpe_ratio': 'Sharpe ratio',
'calmar_ratio': 'Calmar ratio',
'stability_of_timeseries': 'Stability',
'max_drawdown': 'Max drawdown',
'omega_ratio': 'Omega ratio',
'sortino_ratio': 'Sortino ratio',
'tail_ratio': 'Tail ratio',
'value_at_risk': 'Daily value at risk',
'skew': 'Skew',
'kurtosis': 'Kurtosis'
}
for stat_func in simple_stat_funcs:
df[stat_func_names[stat_func.__name__]] =\
daily.apply(lambda x: stat_func(x)).apply(pd.Series)
for stat_func in factor_stat_funcs:
df[stat_func_names[stat_func.__name__]] =\
daily.apply(lambda x: stat_func(
x, daily['SPY'])).apply(pd.Series)
del df['means']
# assign for markowitz use
self.stocks_daily = daily
return df
def stocks_correlation(self):
"""
Calculate stock correlation matrix
References:
1. p. 137 of Modern Portfolio Theory and Investment Analysis
edition 9
2. faculty.washington.edu/ezivot/econ424/portfolioTheoryMatrix.pdf
-------------
Parameters:
- None
- Use stock prices, div yields and portfolio weights
Return:
- Correlation dataframe
"""
# get monthly changes for all stocks
stock_returns = self._stock_monthly_returns()
stock_returns['portfolio'] = self._ptf_monthly_returns()
# get mean values and std by security
returns_mean = stock_returns.mean(axis=0)
returns_std = stock_returns.std(axis=0)
# get covariance matrix
returns_covar = np.cov(
stock_returns.values, rowvar=False, ddof=1)
# get correlation matrix
std_products = np.dot(
returns_std.values.reshape(-1, 1),
returns_std.values.reshape(1, -1))
returns_corr = returns_covar / std_products
df_covar = pd.DataFrame(
returns_covar,
columns=returns_mean.keys(),
index=returns_mean.keys())
df_covar = df_covar.iloc[:-1, :-1]
df_corr = pd.DataFrame(
returns_corr,
columns=returns_mean.keys(),
index=returns_mean.keys())
# assign for markowitz use
self.stocks_covar = df_covar
return df_corr, df_covar
def portfolio_returns(self):
"""
Calculate portfolio evolution
total stocks value
investment returns
dividend returns
total returns
"""
pf = self._daily
cum_investment_returns = pf.reset_index().pivot(
index='date',
columns='symbol',
values='cum_investment_return').sum(axis=1)
cum_dividends = pf.reset_index().pivot(
index='date',
columns='symbol',
values='cum_dividends').sum(axis=1)
return cum_investment_returns, cum_dividends
def portfolio_summary(self):
"""
Calculate portfolio composition and summary by stock
"""
df = self._daily
df = df.groupby(level='symbol').last()
columns_to_names = OrderedDict([
('cum_size', ['Shares', '{:,.0f}']),
('current_weight', ['Portfolio weight', '{:.2f}%']),
('cum_cost_basis', ['Current cost basis', '{:,.2f}']),
('cum_value_close', ['Current value', '{:,.2f}']),
('cum_realized_gain', ['Realized P/L', '{:,.2f}']),
('cum_dividends', ['Dividends', '{:,.2f}']),
('cum_unrealized_gain', ['Unrealized P/L', '{:,.2f}']),
('cum_total_return', ['Total return', '{:,.2f}']),
('current_return_rate', ['Total return rate', '{:,.2f}%'])
])
# convert ratios to percent
df['current_weight'] = df['current_weight'] * 100
# add total row
df = df.copy() # avoid chained assignment warning
df.loc['Portfolio', :] = df.sum(axis=0)
df.loc['Portfolio', 'current_return_rate'] =\
df.loc['Portfolio', 'cum_total_return'] /\
df.loc['Portfolio', 'cum_cost_basis'] * 100
# re-order
df = df[list(columns_to_names.keys())]
# format
df = df.apply(
lambda x: x.map(columns_to_names[x.name][1].format), axis=0)
# rename columns
df.columns =\
df.columns.to_series().apply(lambda x: columns_to_names[x][0])
return df
def portfolio_stats(self):
"""
Calculate actual portfolio stats based on panelframe with daily changes
-------------
Parameters:
- None
- Uses daily panelframe
Return:
- Series with portfolio stats
TODO: daily returns or returns over cost_basis?
"""
pf = self._daily
# capital gains `cum_investment_return` or
# total return `cum_total_return`
return_to_use = 'cum_investment_return'
# cum_return = pf.reset_index().pivot(
# index='date',
# columns='symbol',
# values='cum_investment_return').sum(axis=1)
# cum_return_D1 = pf[return_to_use].sum(1).shift(1)
# cum_return_D2 = pf[return_to_use].sum(1)
# cost_basis = pf['cum_cost_basis'].sum(1)
# returns = (cum_return_D2 - cum_return_D1) / cost_basis
# returns.fillna(0, inplace=True)
# portfolio return over cost_basis
returns = pf.reset_index().pivot(
index='date',
columns='symbol',
values=return_to_use).sum(axis=1)\
.transform(lambda x: x-x.shift(1))/pf.reset_index().pivot(
index='date',
columns='symbol',
values='cum_cost_basis').sum(axis=1)
returns.fillna(0, inplace=True)
# return of just 100% SPY portfolio
# m_D1 = pf['close', :, 'market'].shift(1)
# m_D2 = pf['close', :, 'market']
# market = (m_D2 - m_D1) / pf['close', :, 'market'].iloc[0]
market = pf.reset_index().pivot(
index='date',
columns='symbol',
values='close')['SPY'].transform(lambda x: (x-x.shift(1))/x[0])
market.fillna(0, inplace=True)
"""
Using empyrical functions
and re-using code from pyfolio
"""
simple_stat_funcs = [
self._observed_period_portfolio_return,
self._observed_period_market_return,
emp.annual_return,
emp.annual_volatility,
emp.sharpe_ratio,
emp.calmar_ratio,
emp.stability_of_timeseries,
emp.max_drawdown,
emp.omega_ratio,
emp.sortino_ratio,
stats.skew,
stats.kurtosis,
emp.tail_ratio,
emp.value_at_risk,
]
factor_stat_funcs = [
emp.alpha,
emp.beta,
]
stat_func_names = {
'_observed_period_portfolio_return': 'Total return',
'_observed_period_market_return': 'Market return',
'annual_return': 'Annual return',
'cum_returns_final': 'Cumulative returns',
'annual_volatility': 'Annual volatility',
'alpha': 'Alpha',
'beta': 'Beta',
'sharpe_ratio': 'Sharpe ratio',
'calmar_ratio': 'Calmar ratio',
'stability_of_timeseries': 'Stability',
'max_drawdown': 'Max drawdown',
'omega_ratio': 'Omega ratio',
'sortino_ratio': 'Sortino ratio',
'tail_ratio': 'Tail ratio',
'value_at_risk': 'Daily value at risk',
'skew': 'Skew',
'kurtosis': 'Kurtosis'
}
ptf_stats =
|
pd.Series()
|
pandas.Series
|
# -*- coding: utf-8 -*-
# """@author: Elie"""
# run locally on python 3.8.5('dec1st_py38_xgboostetal':conda)
# %%
# Libraries
# =============================================================================
import pandas as pd
import numpy as np
import datetime
from functools import partial, reduce
from joblib import load, dump
import os
import sys
#plotting
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
import seaborn as sns
import matplotlib as mpl
#ML/Stats
from sklearn.metrics import roc_curve, auc,precision_recall_curve, f1_score
from sklearn.metrics import roc_curve, precision_recall_curve, auc
import xgboost
from xgboost import XGBClassifier
pd.options.mode.chained_assignment = None
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams["font.size"] = "4"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel =
|
pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
|
pandas.read_csv
|
import ast
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import math
import numpy as np
import json
import os
import os.path as path
directory = '.'
with open('config.json') as json_file:
config = json.load(json_file)
config['checkerboard_medium'] = {
'classes': 2,
'sota': 0.00
}
factor = None
#factor = 0.75
if factor:
for dataset in config.keys():
config[dataset]['sota'] = config[dataset]['sota']*factor
# Prepare df_results
df_results =
|
pd.read_csv("results.csv")
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script that collects RAMD data and determines what extra Replicas/ensembles need to be run/rerun.
Created on Fri Jan 31 14:44:55 2020
@author: andrewpotterton
"""
###Libraries
import numpy as np
from os import path
import pandas as pd
###Functions
def get_end_point(filename : str) -> int:
'''
Args: filename = is the RAMDLOG file.
Returns: Endpoint of simulations or np.nan if did not finish.
'''
end_point = np.nan #sets default returned value as np.nan value (so if the end of the file is not EXIT line then np.nan is returned)
with open(filename, 'r') as f: #gets the last line of the RAMDLOG file
lines = f.read().splitlines()
last_line = lines[-1]
last_line = last_line.split(' ') #split last line by spaces
if last_line[0] == 'EXIT:': #if line starts with EXIT, must be the exit line. Therefore simulation finished.
end_point = int(last_line[1]) #assigns the end point as the frame number.
return end_point
def get_pandas_DF(ligand_name : str):
'''
Args: is the ligand name as a string (name of RAMDLOG_[CGS].log)
Returns: pandas DataFrame of Ensemble and replica data for each ligand
'''
globals()['column_names'] = ['Ensemble1', 'Ensemble2', 'Ensemble3', 'Ensemble4', 'Ensemble5', 'Ensemble6', 'Ensemble7', 'Ensemble8']
globals()['row_names'] = ['Rep1','Rep2','Rep3','Rep4','Rep5','Rep6','Rep7','Rep8','Rep9','Rep10','Rep11','Rep12','Rep13','Rep14','Rep15','Rep16','Rep17','Rep18','Rep19','Rep20','Rep21','Rep22','Rep23','Rep24','Rep25']
Ensemble_values =[]
for Ensemble_no in range(1,9):
Replica_values =[]
for Rep in range(1,26):
if path.exists('Ensemble'+str(Ensemble_no)+'/Rep'+str(Rep)+'/RAMDLOG_'+ligand_name+'.log'):
rep_value = get_end_point('Ensemble'+str(Ensemble_no)+'/Rep'+str(Rep)+'/RAMDLOG_'+ligand_name+'.log')
Replica_values.append(rep_value)
else:
Replica_values.append(np.nan)
Ensemble_values.append(Replica_values)
#Transforms Ensemble_values list of lists to DataFrame. columns and index are inversed as in the next step, the dataframe is transposed
DataFrame =
|
pd.DataFrame(Ensemble_values,columns=row_names, index=column_names)
|
pandas.DataFrame
|
from __future__ import print_function
from datetime import datetime
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
import ulmo
import test_util
message_test_sets = [
{
'dcp_address': 'C5149430',
'parser': 'twdb_stevens',
'message_timestamp': "/Date(1559316497303)/",
},
{
'dcp_address': 'C514D73A',
'parser': 'twdb_sutron',
'message_timestamp': "/Date(1559158095000)/",
},
{
'dcp_address': 'C516C1B8',
'parser': 'stevens',
'message_timestamp': "/Date(1559569431753)/",
}
]
def test_parse_dcp_message_timestamp():
for test_set in message_test_sets:
dcp_data_file = 'noaa/goes/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file, force=True):
data = ulmo.noaa.goes.get_data(test_set['dcp_address'], hours=12)
assert data['message_timestamp_utc'][-1] == datetime.fromtimestamp(
int(test_set['message_timestamp'].strip('/Date()'))/1000
)
assert data['message_timestamp_utc'][-1] == datetime.fromtimestamp(
int(test_set['message_timestamp'].strip('/Date()'))/1000
)
twdb_stevens_test_sets = [
{
'message_timestamp_utc': datetime(2013, 10, 30, 15, 28, 18),
'dcp_message': '"BV:11.9 193.76$ 193.70$ 193.62$ 193.54$ 193.49$ 193.43$ 193.37$ 199.62$ 200.51$ 200.98$ 195.00$ 194.33$ ',
'dcp_address': '',
'return_value': [
['2013-10-30 15:00:00', 'bv', 11.90],
['2013-10-30 15:00:00', 'wl', 193.76],
['2013-10-30 14:00:00', 'wl', 193.70],
['2013-10-30 13:00:00', 'wl', 193.62],
['2013-10-30 12:00:00', 'wl', 193.54],
['2013-10-30 11:00:00', 'wl', 193.49],
['2013-10-30 10:00:00', 'wl', 193.43],
['2013-10-30 09:00:00', 'wl', 193.37],
['2013-10-30 08:00:00', 'wl', 199.62],
['2013-10-30 07:00:00', 'wl', 200.51],
['2013-10-30 06:00:00', 'wl', 200.98],
['2013-10-30 05:00:00', 'wl', 195.00],
['2013-10-30 04:00:00', 'wl', 194.33],
],
},
{
'message_timestamp_utc': datetime(2013, 10, 30, 15, 28, 18),
'dcp_message': '"BV:12.6 Channel:5 Time:28 +304.63 +304.63 +304.63 +304.56 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.71 Channel:6 Time:28 +310.51 +310.66 +310.59 +310.51 +310.51 +310.59 +310.59 +310.51 +310.66 +310.51 +310.66 +310.59 ',
'dcp_address': '',
'return_value': [
['2013-10-30 15:00:00', 'bv', 12.60],
['2013-10-30 15:00:00', 'time', 28.00],
['2013-10-30 15:00:00', 'time', 28.00],
['2013-10-30 15:00:00', '5', 304.63],
['2013-10-30 14:00:00', '5', 304.63],
['2013-10-30 13:00:00', '5', 304.63],
['2013-10-30 12:00:00', '5', 304.56],
['2013-10-30 11:00:00', '5', 304.63],
['2013-10-30 10:00:00', '5', 304.63],
['2013-10-30 09:00:00', '5', 304.63],
['2013-10-30 08:00:00', '5', 304.63],
['2013-10-30 07:00:00', '5', 304.63],
['2013-10-30 06:00:00', '5', 304.63],
['2013-10-30 05:00:00', '5', 304.63],
['2013-10-30 04:00:00', '5', 304.71],
['2013-10-30 15:00:00', '6', 310.51],
['2013-10-30 14:00:00', '6', 310.66],
['2013-10-30 13:00:00', '6', 310.59],
['2013-10-30 12:00:00', '6', 310.51],
['2013-10-30 11:00:00', '6', 310.51],
['2013-10-30 10:00:00', '6', 310.59],
['2013-10-30 09:00:00', '6', 310.59],
['2013-10-30 08:00:00', '6', 310.51],
['2013-10-30 07:00:00', '6', 310.66],
['2013-10-30 06:00:00', '6', 310.51],
['2013-10-30 05:00:00', '6', 310.66],
['2013-10-30 04:00:00', '6', 310.59],
]
},
{
'message_timestamp_utc': datetime(2013, 10, 30, 15, 28, 18),
'dcp_message': '"BV:12.6 ',
'dcp_address': '',
'return_value': [
['2013-10-30 15:00:00', 'bv', 12.60],
]
},
{
'message_timestamp_utc': datetime(2013, 10, 30, 15, 28, 18),
'dcp_message': """79."$}X^pZBF8iB~i>>Xmj[bvr^Zv%JXl,DU=l{uu[ time(|@2q^sjS!""",
'dcp_address': '',
'return_value': pd.DataFrame()
},
]
def test_parser_twdb_stevens():
for test_set in twdb_stevens_test_sets:
print('testing twdb_stevens parser')
if isinstance(test_set['return_value'], pd.DataFrame):
parser = getattr(ulmo.noaa.goes.parsers, 'twdb_stevens')
assert_frame_equal(
|
pd.DataFrame()
|
pandas.DataFrame
|
import requests as req
import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import networkx as nx
import re
import os, time, math
from gensim.models import Word2Vec
sb.set()
# data folder path
data_filepath = "static/data/tags.csv"
def file_update_time():
""" returns true if tags file is not updated for more than 2 days"""
filepath = "static/data/update_time.txt" # file of interests
two_days_in_seconds = 172800 # file reupdates in two days
with open(filepath, "r+") as file:
modification_time = file.read()
modified_time = int(modification_time.split()[-1])
time_difference = time.time() - modified_time
return time_difference >= two_days_in_seconds
def convert_to_json(json_data):
"""convert json to python list format"""
list_data = []
dict_data = json.loads(json_data)
for item in dict_data["items"]:
list_data.append([item["tags"]])
list_data_flatten = [item for item in list_data]
return list_data_flatten
def get_stackexg_data():
"""get json-format data from stackexg pages"""
number_of_pages = 120 # how many web pages wants to scrape
number_of_records = 10000 # number of rows of tags wanted
tags = []
for page_number in range(1, number_of_pages + 1):
data_science_url = "https://api.stackexchange.com/2.2/questions?page={}&pagesize=100&order=desc&sort=activity&site=datascience".format(
page_number
)
if len(tags) == number_of_records:
break
req_json_data = req.get(data_science_url)
# check if stackexg api blocks request from the the ip address
if not file_update_time():
break # break the loop if it is before update time
if (
req_json_data.status_code == 200
): # check request is successful before parsing webpage data
rows_list_data = convert_to_json(req_json_data.text)
for row in rows_list_data:
tags.append(row)
else:
print("error due to many requests from this ip address.")
if len(tags) >= number_of_records:
# initialize an empty dataframe, write the tags into dataframe.
df =
|
pd.DataFrame(tags, columns=["tags"])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import os
import time
import mshoot
# Random seed
np.random.seed(12345)
# Paths
ms_file = os.path.join('examples', 'bs2019', 'measurements.csv')
fmu_dir = os.path.join('examples', 'bs2019', 'case1', 'models')
# FMU list
fmus = os.listdir(fmu_dir)
# Simulation period
t0 = '2018-04-05 00:00:00'
t1 = '2018-04-08 00:00:00'
# Read measurements
ms = pd.read_csv(ms_file)
ms['datetime'] =
|
pd.to_datetime(ms['datetime'])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 30 10:34:37 2016
@author: slauniai
"""
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
eps = np.finfo(float).eps # machine epsilon
def clear_console():
"""
clears Spyder console window - does not affect namespace
"""
import os
clear = lambda: os.system('cls')
clear()
return None
""" ******* Get forcing data for FIHy and FICage sites ******** """
def read_HydeDaily(filename):
cols=['time','doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','PrecSmear','Prec','U','Pamb',
'SWE0','SWCh','SWCa','SWCb','SWCc', 'Tsh','Tsa','Tsb','Tsc','RnetFlag','Trfall','Snowdepth','Snowdepthstd','SWE','SWEstd','Roff1','Roff2']
dat=pd.read_csv(filename,sep='\s+',header=None, names=None, parse_dates=[[0,1,2]], keep_date_col=False)
dat.columns=cols
dat.index=dat['time']; dat=dat.drop(['time','SWE0'],axis=1)
forc=dat[['doy','Ta','VPD','Prec','Par','U']]; forc['Par']= 1/4.6*forc['Par']; forc['Rg']=2.0*forc['Par']
forc['VPD'][forc['VPD']<=0]=eps
#relatively extractable water, Hyde A-horizon
#poros = 0.45
fc = 0.30
wp = 0.10
Wliq = dat['SWCa']
Rew = np.maximum( 0.0, np.minimum( (Wliq-wp)/(fc - wp + eps), 1.0) )
forc['Rew'] = Rew
forc['CO2'] = 380.0
# beta, soil evaporation parameter
#forc['beta'] = Wliq / fc
return dat, forc
def read_CageDaily(filepath):
cols=['time','doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','SWCa','PrecSmear','Prec','U','Pamb']
dat1=
|
pd.read_csv(filepath + 'HydeCage4yr-2000.txt',sep='\s+',header=None, names=None, parse_dates=[[0,1,2]], keep_date_col=False)
|
pandas.read_csv
|
from kfp.v2.dsl import (Dataset, Input, Output)
def calc_market_watch(
date_ref: str,
# comp_result : str,
):
import pandas as pd
import numpy as np
import pandas_gbq # type: ignore
import time
from trading_calendars import get_calendar
cal_krx = get_calendar('XKRX')
from pandas.tseries.offsets import CustomBusinessDay
cbday = CustomBusinessDay(holidays=cal_krx.adhoc_holidays)
def get_df_market(date_ref, n_before):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
date_ref_b = (pd.Timestamp(date_ref) - pd.Timedelta(n_before, 'd')).strftime('%Y-%m-%d')
sql = f'''
SELECT
*
FROM
`dots-stock.red_lion.df_markets_clust_parti`
WHERE
date between "{date_ref_b}" and "{date_ref_}"
'''
PROJECT_ID = 'dots-stock'
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID, use_bqstorage_api=True)
df = df.drop_duplicates()
return df
df_markets_1 =get_df_market(date_ref, 20)
def get_n_day_straight_up(NN):
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_N_d_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
)
[lambda df: df.oc > 0]
[lambda df: df.ChagesRatio > 0]
.groupby(['Name'])
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
).index.to_list()
return l_N_d_up
def get_n_day_straight_dn(NN):
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_N_d_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
)
[lambda df: df.oc < 0]
[lambda df: df.ChagesRatio < 0]
.groupby(['Name'])
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
).index.to_list()
return l_N_d_up
def get_n_day_straight_up_last_dn(NN):
'''연속 몇일 오르고 마지막 내린 종목
Return : list 종목명
'''
df_markets_ = (df_markets_1
[lambda df: df.date >= pd.Timestamp(date_ref) - (NN-1) * cbday ]
.sort_values('date', ascending=True)
)
l_Nd_dn_last_up = (df_markets_
[lambda df: df.Open != 0] # Open 가격이 0 인 경우 그날 거래 없었던 것
.assign(
oc=lambda df: (df.Close - df.Open)/df.Open,
last_day=lambda df: df['date'] == pd.Timestamp(date_ref),
last_day_down =
lambda df: (df.last_day == True) & (df.oc < 0),
rest_day_up =
lambda df: (df.last_day == False) & (df.oc > 0),
both_met =
lambda df: (df.last_day_down | df.rest_day_up),
)
# filter 조건 맞는 경우만
.loc[lambda df: df.both_met == True]
# groupby 해서 조건맞는 경우가 종목당 6개 인지 확인
.groupby('Name')
[['Code']].agg('count')
.rename(columns={'Code':'count_Nd_up'})
[lambda df: df.count_Nd_up == NN]
# [lambda df: df['Code'] == NN]
).index.to_list()
return l_Nd_dn_last_up
def get_n_day_straight_dn_last_up(NN):
'''연속 몇일 내리고 마지막 오른 종목
Return : list 종목명
'''
df_markets_ = (df_markets_1
[lambda df: df.date >=
|
pd.Timestamp(date_ref)
|
pandas.Timestamp
|
#!/usr/bin/python
"""script to generate stimuli
"""
import numpy as np
from matplotlib import pyplot as plt
import itertools
import pandas as pd
import scipy.io as sio
import seaborn as sns
import os
import argparse
import json
def bytescale_func(data, cmin=None, cmax=None, high=254, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-254, so that mid is 127).
If the input image already has dtype uint8, no scaling is done.
This is copied from scipy.misc, where it is deprecated
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 254.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> import numpy as np
>>> from sfp.utils import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def mkR(size, exponent=1, origin=None):
'''make distance-from-origin (r) matrix
Compute a matrix of dimension SIZE (a [Y X] list/tuple, or a scalar)
containing samples of a radial ramp function, raised to power EXPONENT
(default = 1), with given ORIGIN (default = (size+1)//2, (0, 0) = upper left).
NOTE: the origin is not rounded to the nearest int
'''
if not hasattr(size, '__iter__'):
size = (size, size)
if origin is None:
origin = ((size[0]+1)/2., (size[1]+1)/2.)
elif not hasattr(origin, '__iter__'):
origin = (origin, origin)
xramp, yramp = np.meshgrid(np.arange(1, size[1]+1)-origin[1],
np.arange(1, size[0]+1)-origin[0])
if exponent <= 0:
# zero to a negative exponent raises:
# ZeroDivisionError: 0.0 cannot be raised to a negative power
r = xramp ** 2 + yramp ** 2
res = np.power(r, exponent / 2.0, where=(r != 0))
else:
res = (xramp ** 2 + yramp ** 2) ** (exponent / 2.0)
return res
def mkAngle(size, phase=0, origin=None):
'''make polar angle matrix (in radians)
Compute a matrix of dimension SIZE (a [Y X] list/tuple, or a scalar)
containing samples of the polar angle (in radians, CW from the X-axis,
ranging from -pi to pi), relative to angle PHASE (default = 0), about ORIGIN
pixel (default = (size+1)/2).
NOTE: the origin is not rounded to the nearest int
'''
if not hasattr(size, '__iter__'):
size = (size, size)
if origin is None:
origin = ((size[0]+1)/2., (size[1]+1)/2.)
elif not hasattr(origin, '__iter__'):
origin = (origin, origin)
xramp, yramp = np.meshgrid(np.arange(1, size[1]+1)-origin[1],
np.arange(1, size[0]+1)-origin[0])
xramp = np.array(xramp)
yramp = np.array(yramp)
res = np.arctan2(yramp, xramp)
res = ((res+(np.pi-phase)) % (2*np.pi)) - np.pi
return res
def log_polar_grating(size, w_r=0, w_a=0, phi=0, ampl=1, origin=None, scale_factor=1):
"""Make a sinusoidal grating in logPolar space.
this allows for the easy creation of stimuli whose spatial frequency decreases with
eccentricity, as the peak spatial frequency of neurons in the early visual cortex does.
Examples
============
radial: `log_polar_grating(512, 4, 10)`
angular: `log_polar_grating(512, 4, w_a=10)`
spiral: `log_polar_grating(512, 4, 10, 10)`
plaid: `log_polar_grating(512, 4, 10) + log_polar_grating(512, 4, w_a=10)`
Parameters
=============
size: scalar. size of the image (only square images permitted).
w_r: int, logRadial frequency. Units are matched to those of the angular frequency (`w_a`).
w_a: int, angular frequency. Units are cycles per revolution around the origin.
phi: int, phase (in radians).
ampl: int, amplitude
origin: 2-tuple of floats, the origin of the image, from which all distances will be measured
and angles will be relative to. By default, the center of the image
scale_factor: int or float. how to scale the distance from the origin before computing the
grating. this is most often done for checking aliasing; e.g., set size_2 = 100*size_1 and
scale_factor_2 = 100*scale_factor_1. then the two gratings will have the same pattern, just
sampled differently
"""
assert not hasattr(size, '__iter__'), "Only square images permitted, size must be a scalar!"
rad = mkR(size, origin=origin)/scale_factor
# if the origin is set such that it lies directly on a pixel, then one of the pixels will have
# distance 0, that means we'll have a -inf out of np.log2 and thus a nan from the cosine. this
# little hack avoids that issue.
if 0 in rad:
rad += 1e-12
lrad = np.log2(rad**2)
theta = mkAngle(size, origin=origin)
return ampl * np.cos(((w_r * np.log(2))/2) * lrad + w_a * theta + phi)
def _create_better_sampled_grating(orig_size, w_r=0, w_a=0, phi=0, ampl=1, orig_origin=None,
orig_scale_factor=1, check_scale_factor=99):
if check_scale_factor % 2 == 0:
raise Exception("For this aliasing check to work, the check_scale_factor must be odd!")
if orig_origin is None:
origin = None
else:
# this preserves origin's shape, regardless of whether it's an iterable or a scalar
origin = np.array(orig_origin) * check_scale_factor - (check_scale_factor - 1)/2
return log_polar_grating(orig_size*check_scale_factor, w_r, w_a, phi, ampl, origin,
orig_scale_factor*check_scale_factor)
def aliasing_plot(better_sampled_stim, stim, slices_to_check=None, axes=None, **kwargs):
"""Plot to to check aliasing.
This does not create the stimuli, only plots them (see `check_aliasing` or `check_aliasing_with
mask` for functions that create the stimuli and then call this to plot them)
to add to an existing figure, pass axes (else a new one will be created)
"""
size = stim.shape[0]
check_scale_factor = better_sampled_stim.shape[0] // size
if slices_to_check is None:
slices_to_check = [(size+1)//2]
elif not hasattr(slices_to_check, '__iter__'):
slices_to_check = [slices_to_check]
if axes is None:
fig, axes = plt.subplots(ncols=len(slices_to_check), squeeze=False,
figsize=(5*len(slices_to_check), 5), **kwargs)
# with squeeze=False, this will always be a 2d array, but because we only set ncols, it
# will only have axes in one dimension
axes = axes[0]
x0 = np.array(list(range(size))) / float(size) + 1./(size*2)
x1 = (np.array(list(range(better_sampled_stim.shape[0]))) / float(better_sampled_stim.shape[0])
+ 1./(better_sampled_stim.shape[0]*2))
for i, ax in enumerate(axes):
ax.plot(x1, better_sampled_stim[:, check_scale_factor*slices_to_check[i] +
(check_scale_factor - 1)//2])
ax.plot(x0, stim[:, slices_to_check[i]], 'o:')
def check_aliasing(size, w_r=0, w_a=0, phi=0, ampl=1, origin=None, scale_factor=1,
slices_to_check=None, check_scale_factor=99):
"""Create a simple plot to visualize aliasing
arguments are mostly the same as for log_polar_grating. this creates both the specified
stimulus, `orig_stim`, and a `better_sampled_stim`, which has `check_scale_factor` more points
in each direction. both gratings are returned and a quick plot is generated.
NOTE that because this requires creating a much larger gradient, it can take a while. Reduce
`check_scale_factor` to speed it up (at the risk of your "ground truth" becoming aliased)
slices_to_check: list, None, or int. slices of the stimulus to plot. if None, will plot
center
"""
orig_stim = log_polar_grating(size, w_r, w_a, phi, ampl, origin, scale_factor)
better_sampled_stim = _create_better_sampled_grating(size, w_r, w_a, phi, ampl, origin,
scale_factor, check_scale_factor)
aliasing_plot(better_sampled_stim, orig_stim, slices_to_check)
return orig_stim, better_sampled_stim
def _fade_mask(mask, inner_number_of_fade_pixels, outer_number_of_fade_pixels, origin=None):
"""note that mask must contain 0s where you want to mask out, 1s elsewhere
"""
# if there's no False in mask, then we don't need to mask anything out. and if there's only
# False, we don't need to fade anything. and if there's no fade pixels, then we don't fade
# anything
if False not in mask or True not in mask or (inner_number_of_fade_pixels == 0 and
outer_number_of_fade_pixels == 0):
return mask
size = mask.shape[0]
rad = mkR(size, origin=origin)
inner_rad = (mask*rad)[(mask*rad).nonzero()].min()
# in this case, there really isn't an inner radius, just an outer one, so we ignore this
if inner_rad == rad.min():
inner_rad = 0
inner_number_of_fade_pixels = 0
outer_rad = (mask*rad).max()
# in order to get the right number of pixels to act as transition, we set the frequency based
# on the specified number_of_fade_pixels
def inner_fade(x):
if inner_number_of_fade_pixels == 0:
return (-np.cos(2*np.pi*(x-inner_rad) / (size/2.))+1)/2
inner_fade_freq = (size/2.) / (2*inner_number_of_fade_pixels)
return (-np.cos(inner_fade_freq*2*np.pi*(x-inner_rad) / (size/2.))+1)/2
def outer_fade(x):
if outer_number_of_fade_pixels == 0:
return (-np.cos(2*np.pi*(x-outer_rad) / (size/2.))+1)/2
outer_fade_freq = (size/2.) / (2*outer_number_of_fade_pixels)
return (-np.cos(outer_fade_freq*2*np.pi*(x-outer_rad) / (size/2.))+1)/2
faded_mask = np.piecewise(rad,
[rad < inner_rad,
(rad >= inner_rad) & (rad <= (inner_rad + inner_number_of_fade_pixels)),
(rad > (inner_rad + inner_number_of_fade_pixels)) & (rad < outer_rad - outer_number_of_fade_pixels),
(rad >= outer_rad - outer_number_of_fade_pixels) & (rad <= (outer_rad)),
(rad > (outer_rad))],
[0, inner_fade, 1, outer_fade, 0])
return faded_mask
def _calc_sf_analytically(x, y, stim_type='logpolar', w_r=None, w_a=None, w_x=None, w_y=None):
"""helper function that calculates spatial frequency (in cpp)
this should NOT be called directly. it is the function that gets called by `sf_cpp` and
`create_sf_maps_cpp`.
"""
if stim_type == 'logpolar':
if w_r is None or w_a is None or w_x is not None or w_y is not None:
raise Exception("When stim_type is %s, w_r / w_a must be set and w_x / w_y must be"
" None!" % stim_type)
elif stim_type == 'constant':
if w_r is not None or w_a is not None or w_x is None or w_y is None:
raise Exception("When stim_type is constant, w_x / w_y must be set and w_a / w_r must"
" be None!")
else:
raise Exception("Don't know how to handle stim_type %s!" % stim_type)
# we want to approximate the spatial frequency of our log polar gratings. We can do that using
# the first two terms of the Taylor series. Since our gratings are of the form cos(g(X)) (where
# X contains both x and y values), then to approximate them at location X_0, we'll use
# cos(g(X_0) + g'(X_0)(X-X_0)), where g'(X_0) is the derivative of g at X_0 (with separate x
# and y components). g(X_0) is the phase of the approximation and so not important here, but
# that g'(X_0) is the local spatial frequency that we're interested in. Thus we take the
# derivative of our log polar grating function with respect to x and y in order to get dx and
# dy, respectively (after some re-arranging and cleaning up). the constant stimuli, by
# definition, have a constant spatial frequency every where in the image.
if stim_type == 'logpolar':
dy = (y * w_r + w_a * x) / (x**2 + y**2)
dx = (x * w_r - w_a * y) / (x**2 + y**2)
elif stim_type == 'constant':
try:
size = x.shape
dy = w_y * np.ones((size, size))
dx = w_x * np.ones((size, size))
# if x is an int, this will raise a SyntaxError; if it's a float, it will raise an
# AttributeError; if it's an array with a single value (e.g., np.array(1), not
# np.array([1])), then it will raise a TypeError
except (SyntaxError, TypeError, AttributeError):
dy = w_y
dx = w_x
if stim_type == 'logpolar':
# Since x, y are in pixels (and so run from ~0 to ~size/2), dx and dy need to be divided by
# 2*pi in order to get the frequency in cycles / pixel. This is analogous to the 1d case:
# if x runs from 0 to 1 and f(x) = cos(w * x), then the number of cycles in f(x) is w /
# 2*pi. (the values for the constant stimuli are given in cycles per pixel already)
dy /= 2*np.pi
dx /= 2*np.pi
# I want this to lie between 0 and 2*pi, because otherwise it's confusing
direction = np.mod(np.arctan2(dy, dx), 2*np.pi)
return dx, dy, np.sqrt(dx**2 + dy**2), direction
def sf_cpp(eccen, angle, stim_type='logpolar', w_r=None, w_a=None, w_x=None, w_y=None):
"""calculate the spatial frequency in cycles per pixel.
this function returns spatial frequency values; it returns values that give the spatial
frequency at the point specified by x, y (if you instead want a map showing the spatial
frequency everywhere in the specified stimulus, use `create_sf_maps_cpp`). returns four values:
the spatial frequency in the x direction (dx), the spatial frequency in the y direction (dy),
the magnitude (sqrt(dx**2 + dy**2)) and the direction (arctan2(dy, dx))
In most cases, you want the magnitude, as this is the local spatial frequency of the specified
grating at that point.
NOTE: for this to work, the zero for the angle you're passing in must correspond to the right
horizontal meridian, angle should lie between 0 and 2*pi, and you should move clockwise as
angle increases. This is all so it corresponds to the values for the direction of the spatial
frequency.
eccen, angle: floats. The location you want to find the spatial frequency for, in polar
coordinates. eccen should be in pixels, NOT degrees. angle should be in radians.
stim_type: {'logpolar', 'constant'}. which type of stimuli to generate the spatial frequency
map for. This matters because we determine the spatial frequency maps analytically and so
*cannot* do so in a stimulus-driven manner. if 'logpolar', the log-polar gratings created by
log_polar_grating. if 'constant', the constant gratings created by create_sin_cpp (and
gen_constant_stim_set). If 'constant', then w_x and w_y must be set, w_r and w_a must be None;
if 'logpolar', then the opposite.
"""
x = eccen * np.cos(angle)
y = eccen * np.sin(angle)
if x == 0:
x += 1e-12
if y == 0:
y += 1e-12
return _calc_sf_analytically(x, y, stim_type, w_r, w_a, w_x, w_y)
def sf_cpd(eccen, angle, pixel_diameter=714, degree_diameter=8.4, stim_type='logpolar', w_r=None,
w_a=None, w_x=None, w_y=None):
"""calculate the spatial frequency in cycles per degree.
this function returns spatial frequency values; it returns values that give the spatial
frequency at the point specified by x, y (if you instead want a map showing the spatial
frequency everywhere in the specified stimulus, use `create_sf_maps_cpp`). returns four values:
the spatial frequency in the x direction (dx), the spatial frequency in the y direction (dy),
the magnitude (sqrt(dx**2 + dy**2)) and the direction (arctan2(dy, dx))
In most cases, you want the magnitude, as this is the local spatial frequency of the specified
grating at that point.
NOTE: for this to work, the zero for the angle you're passing in must correspond to the right
horizontal meridian, angle should lie between 0 and 2*pi, and you should move clockwise as
angle increases. This is all so it corresponds to the values for the direction of the spatial
frequency.
degree_diameter: int, the visual angle (in degrees) corresponding to the diameter of the full
image
eccen, angle: floats. The location you want to find the spatial frequency for, in polar
coordinates. eccen should be in degrees (NOT pixels). angle should be in radians.
stim_type: {'logpolar', 'constant'}. which type of stimuli to generate the spatial frequency
map for. This matters because we determine the spatial frequency maps analytically and so
*cannot* do so in a stimulus-driven manner. if 'logpolar', the log-polar gratings created by
log_polar_grating. if 'constant', the constant gratings created by create_sin_cpp (and
gen_constant_stim_set). If 'constant', then w_x and w_y must be set, w_r and w_a must be None;
if 'logpolar', the opposite.
"""
conversion_factor = degree_diameter / pixel_diameter
# this is in degrees, so we divide it by deg/pix to get the eccen in pix
eccen /= conversion_factor
dx, dy, magnitude, direction = sf_cpp(eccen, angle, stim_type, w_r, w_a, w_x, w_y)
# these are all in cyc/pix, so we divide them by deg/pix to get them in cyc/deg
dx /= conversion_factor
dy /= conversion_factor
magnitude /= conversion_factor
return dx, dy, magnitude, direction
def sf_origin_polar_cpd(eccen, angle, pixel_diameter=714, degree_diameter=8.4,
stim_type='logpolar', w_r=None, w_a=None, w_x=None, w_y=None):
"""calculate the local origin-referenced polar spatial frequency (radial/angular) in cpd
returns the local spatial frequency with respect to the radial and angular directions.
NOTE: for this to work, the zero for the angle you're passing in must correspond to the right
horizontal meridian, angle should lie between 0 and 2*pi, and you should move clockwise as
angle increases. This is all so it corresponds to the values for the direction of the spatial
frequency.
pixel_diameter: int, the visual angle (in degrees) corresponding to the diameter of the full
image
eccen, angle: floats. The location you want to find the spatial frequency for, in polar
coordinates. eccen should be in degrees (NOT pixels). angle should be in radians.
stim_type: {'logpolar', 'constant'}. which type of stimuli to generate the spatial frequency
map for. This matters because we determine the spatial frequency maps analytically and so
*cannot* do so in a stimulus-driven manner. if 'logpolar', the log-polar gratings created by
log_polar_grating. if 'constant', the constant gratings created by create_sin_cpp (and
gen_constant_stim_set). If 'constant', then w_x and w_y must be set, w_r and w_a must be None;
if 'logpolar', then the opposite.
"""
_, _, mag, direc = sf_cpd(eccen, angle, pixel_diameter, degree_diameter, stim_type, w_r, w_a,
w_x, w_y)
new_angle = np.mod(direc - angle, 2*np.pi)
dr = mag * np.cos(new_angle)
da = mag * np.sin(new_angle)
return dr, da, new_angle
def create_sf_maps_cpp(pixel_diameter=714, origin=None, scale_factor=1, stim_type='logpolar',
w_r=None, w_a=None, w_x=None, w_y=None):
"""Create maps of spatial frequency in cycles per pixel.
this function creates spatial frequency maps; that is, it returns images that show the spatial
frequency everywhere in the specified stimulus (if you instead want the spatial frequency at a
specific point, use `sf_cpp`). returns four maps: the spatial frequency in the x direction
(dx), the spatial frequency in the y direction (dy), the magnitude (sqrt(dx**2 + dy**2)) and
the direction (arctan2(dy, dx))
In most cases, you want the magnitude, as this is the local spatial frequency of the
corresponding log polar grating at that point.
stim_type: {'logpolar', 'constant'}. which type of stimuli to generate the spatial frequency
map for. This matters because we determine the spatial frequency maps analytically and so
*cannot* do so in a stimulus-driven manner. if 'logpolar', the log-polar gratings created by
log_polar_grating. if 'constant', the constant gratings created by create_sin_cpp (and
gen_constant_stim_set). If 'constant', then w_x and w_y must be set, w_r and w_a must be None;
if 'logpolar', then the opposite.
"""
assert not hasattr(pixel_diameter, '__iter__'), "Only square images permitted, pixel_diameter must be a scalar!"
pixel_diameter = int(pixel_diameter)
if origin is None:
origin = ((pixel_diameter+1) / 2., (pixel_diameter+1) / 2.)
# we do this in terms of x and y
x, y = np.divide(np.meshgrid(np.array(list(range(1, pixel_diameter+1))) - origin[0],
np.array(list(range(1, pixel_diameter+1))) - origin[1]),
scale_factor)
# if the origin is set such that it lies directly on a pixel, then one of the pixels will have
# distance 0 and that means we'll have a divide by zero coming up. this little hack avoids that
# issue.
if 0 in x:
x += 1e-12
if 0 in y:
y += 1e-12
return _calc_sf_analytically(x, y, stim_type, w_r, w_a, w_x, w_y)
def create_sf_maps_cpd(pixel_diameter=714, degree_diameter=7.4, origin=None, scale_factor=1,
stim_type='logpolar', w_r=None, w_a=None, w_x=None, w_y=None):
"""Create map of the spatial frequency in cycles per degree of visual angle
this function creates spatial frequency maps; that is, it returns images that show the spatial
frequency everywhere in the specified stimulus (if you instead want the spatial frequency at a
specific point, use `sf_cpp`). returns four maps: the spatial frequency in the x direction
(dx), the spatial frequency in the y direction (dy), the magnitude (sqrt(dx**2 + dy**2)) and
the direction (arctan2(dy, dx))
In most cases, you want the magnitude, as this is the local spatial frequency of the
corresponding log polar grating at that point
"""
conversion_factor = degree_diameter / pixel_diameter
dx, dy, mag, direc = create_sf_maps_cpp(pixel_diameter, origin, scale_factor, stim_type, w_r,
w_a, w_x, w_y)
dx /= conversion_factor
dy /= conversion_factor
mag /= conversion_factor
return dx, dy, mag, direc
def create_sf_origin_polar_maps_cpd(pixel_diameter=714, degree_diameter=8.4, origin=None,
scale_factor=1, stim_type='logpolar', w_r=None, w_a=None,
w_x=None, w_y=None):
"""create map of the origin-referenced polar spatial frequency (radial/angular) in cpd
returns maps of the spatial frequency with respect to the radial and angular directions.
degree_diameter: int, the visual angle (in degrees) corresponding to the diameter of the full
image
stim_type: {'logpolar', 'constant'}. which type of stimuli to generate the spatial frequency
map for. This matters because we determine the spatial frequency maps analytically and so
*cannot* do so in a stimulus-driven manner. if 'logpolar', the log-polar gratings created by
log_polar_grating. if 'constant', the constant gratings created by create_sin_cpp (and
gen_constant_stim_set).If 'constant', then w_x and w_y must be set, w_r and w_a must be None;
if 'logpolar', then the opposite.
"""
_, _, mag, direc = create_sf_maps_cpd(pixel_diameter, degree_diameter, origin, scale_factor,
stim_type, w_r, w_a, w_x, w_y)
angle = mkAngle(pixel_diameter, origin=origin)
new_angle = np.mod(direc - angle, 2*np.pi)
dr = mag * np.cos(new_angle)
da = mag * np.sin(new_angle)
return dr, da, new_angle
def create_antialiasing_mask(size, w_r=0, w_a=0, origin=None, number_of_fade_pixels=3,
scale_factor=1):
"""Create mask to hide aliasing
Because of how our stimuli are created, they have higher spatial frequency at the origin
(probably center of the image) than at the edge of the image. This makes it a little harder to
determine where aliasing will happen. for the specified arguments, this will create the mask
that will hide the aliasing of the grating(s) with these arguments.
the mask will not be strictly binary, there will a `number_of_fade_pixels` where it transitions
from 0 to 1. this transition is half of a cosine.
returns both the faded_mask and the binary mask.
"""
_, _, mag, _ = create_sf_maps_cpp(size, origin, scale_factor, w_r=w_r, w_a=w_a)
# the nyquist frequency is .5 cycle per pixel, but we make it a lower to give ourselves a
# little fudge factor
nyq_freq = .475
mask = mag < nyq_freq
faded_mask = _fade_mask(mask, number_of_fade_pixels, 0, origin)
return faded_mask, mask
def create_outer_mask(size, origin, radius=None, number_of_fade_pixels=3):
"""Create mask around the outside of the image
this gets us a window that creates a circular (or some subset of circular) edge. this returns
both the faded and the unfaded versions.
radius: float or None. the radius, in pixels, of the mask. Everything farther away from the
origin than this will be masked out. If None, we pick radius such that it's the distance to the
edge of the square image. If horizontal and vertical have different distances, we will take the
shorter of the two. If the distance from the origin to the horizontal edge is not identical in
both directions, we'll take the longer of the two (similar for vertical).
To combine this with the antialiasing mask, call np.logical_and on the two unfaded masks (and
then fade that if you want to fade it)
"""
rad = mkR(size, origin=origin)
assert not hasattr(size, "__iter__"), "size must be a scalar!"
if radius is None:
radius = min(rad[:, size//2].max(), rad[size//2, :].max())
mask = rad < radius
return _fade_mask(mask, 0, number_of_fade_pixels, origin), mask
def check_aliasing_with_mask(size, w_r=0, w_a=0, phi=0, ampl=1, origin=None, scale_factor=1,
number_of_fade_pixels=3, slices_to_check=None, check_scale_factor=99):
"""check the aliasing when mask is applied
"""
stim = log_polar_grating(size, w_r, w_a, phi, ampl, origin, scale_factor)
fmask, mask = create_antialiasing_mask(size, w_r, w_a, origin)
better_sampled_stim = _create_better_sampled_grating(size, w_r, w_a, phi, ampl, origin,
scale_factor, check_scale_factor)
big_fmask = fmask.repeat(check_scale_factor, 0).repeat(check_scale_factor, 1)
big_mask = mask.repeat(check_scale_factor, 0).repeat(check_scale_factor, 1)
if slices_to_check is None:
slices_to_check = [(size+1)//2]
fig, axes = plt.subplots(ncols=3, nrows=len(slices_to_check), squeeze=False,
figsize=(15, 5*len(slices_to_check)))
aliasing_plot(better_sampled_stim, stim, slices_to_check, axes[:, 0])
aliasing_plot(big_fmask*better_sampled_stim, fmask*stim, slices_to_check, axes[:, 1])
aliasing_plot(big_mask*better_sampled_stim, mask*stim, slices_to_check, axes[:, 2])
axes[0, 0].set_title("Slices of un-masked stimulus")
axes[0, 1].set_title("Slices of fade-masked stimulus")
axes[0, 2].set_title("Slices of binary-masked stimulus")
return stim, fmask, mask, better_sampled_stim, big_fmask, big_mask
def find_ecc_range_in_pixels(stim, mid_val=127):
"""find the min and max eccentricity of the stimulus, in pixels
all of our stimuli have a central aperture where nothing is presented and an outside limit,
beyond which nothing is presented.
this assumes the fixation is in the center of the stimulus, will have to re-think things if
it's not.
returns min, max
"""
if stim.ndim == 3:
stim = stim[0, :, :]
R = mkR(stim.shape)
x, y = np.where(stim != mid_val)
return R[x, y].min(), R[x, y].max()
def find_ecc_range_in_degrees(stim, degree_radius, mid_val=127):
"""find the min and max eccentricity of the stimulus, in degrees
all of our stimuli have a central aperture where nothing is presented and an outside limit,
beyond which nothing is presented. In order to make sure we're not looking at voxels whose pRFs
lie outside the stimulus, we want to know the extent of the stimulus annulus, in degrees
this assumes the fixation is in the center of the stimulus, will have to re-think things if
it's not.
stim_rad_deg: int or float, the radius of the stimulus, in degrees.
returns min, max
"""
if stim.ndim == 3:
stim = stim[0, :, :]
Rmin, Rmax = find_ecc_range_in_pixels(stim, mid_val)
R = mkR(stim.shape)
# if stim_rad_deg corresponds to the max vertical/horizontal extent, the actual max will be
# np.sqrt(2*stim_rad_deg**2) (this corresponds to the far corner). this should be the radius of
# the screen, because R starts from the center and goes to the edge
factor = R.max() / np.sqrt(2*degree_radius**2)
return Rmin / factor, Rmax / factor
def calculate_stim_local_sf(stim, w_1, w_2, stim_type, eccens, angles, degree_radius=4.2,
plot_flag=False, mid_val=127):
"""calculate the local spatial frequency for a specified stimulus and screen size
stim: 2d array of floats. an example stimulus. used to determine where the stimuli are masked
(and thus where the spatial frequency is zero).
w_1, w_2: ints or floats. the first and second components of the stimulus's spatial
frequency. if stim_type is 'logarpolar', this should be the radial and angular components (in
that order!); if stim_type is 'constant', this should be the x and y components (in that
order!)
stim_type: {'logpolar', 'constant'}. which type of stimuli were used in the session we're
analyzing. This matters because it changes the local spatial frequency and, since that is
determined analytically and not directly from the stimuli, we have no way of telling otherwise.
eccens, angles: lists of floats. these are the eccentricities and angles we want to find
local spatial frequency for.
degree_radius: float, the radius of the stimulus, in degrees of visual angle
plot_flag: boolean, optional, default False. Whether to create a plot showing the local spatial
frequency vs eccentricity for the specified stimulus
mid_val: int. the value of mid-grey in the stimuli, should be 127 or 128
"""
eccen_min, eccen_max = find_ecc_range_in_degrees(stim, degree_radius, mid_val)
eccen_local_freqs = []
for i, (e, a) in enumerate(zip(eccens, angles)):
if stim_type == 'logpolar':
dx, dy, mag, direc = sf_cpd(e, a, stim.shape[0], degree_radius*2,
stim_type=stim_type, w_r=w_1, w_a=w_2)
dr, da, new_angle = sf_origin_polar_cpd(e, a, stim.shape[0], degree_radius*2,
stim_type=stim_type, w_r=w_1, w_a=w_2)
elif stim_type == 'constant':
dx, dy, mag, direc = sf_cpd(e, a, stim.shape[0], degree_radius*2, stim_type=stim_type,
w_x=w_1, w_y=w_2)
dr, da, new_angle = sf_origin_polar_cpd(e, a, stim.shape[0], degree_radius*2,
stim_type=stim_type, w_x=w_1, w_y=w_2)
eccen_local_freqs.append(pd.DataFrame(
{'local_w_x': dx, 'local_w_y': dy, 'local_w_r': dr, 'local_w_a': da, 'eccen': e,
'angle': a, 'local_sf_magnitude': mag, 'local_sf_xy_direction': direc,
'local_sf_ra_direction': new_angle}, [i]))
eccen_local_freqs = pd.concat(eccen_local_freqs)
if plot_flag:
plt.plot(eccen_local_freqs['eccen'], eccen_local_freqs['local_sf_magnitude'])
ax = plt.gca()
ax.set_title('Spatial frequency vs eccentricity')
ax.set_xlabel('Eccentricity (degrees)')
ax.set_ylabel('Local spatial frequency (cpd)')
return eccen_local_freqs
def check_stim_properties(pixel_diameter=714, origin=None, degree_diameter=8.4, w_r=0,
w_a=range(10), eccen_range=(1, 4.2)):
"""Creates a dataframe with data on several stimulus properties, based on the specified arguments
the properties examined are:
- mask radius in pixels
- mask radius in degrees
- max frequency in cycles per pixel
- min frequency in cycles per pixel
- max frequency in cycles per degree
- min frequency in cycles per degree
- max masked frequency in cycles per pixel
- max masked frequency in cycles per degree
we also return a second dataframe, sf_df, which contains the local spatial frequency of each
(unmasked) stimulus at each eccentricity, in cycles per pixel and cycles per degree. we only
examine the eccentricities within eccen_range, and we bin by degree, averaging within each
bin. that is, with eccen_range=(1, 5), we calculate the average local spatial frequency of a
given stimulus from 1 to 2 degrees, 2 to 3 degrees, ..., 4 to 5 degrees.
Note that we don't calculate the min masked frequency because that will always be zero (because
we zero out the center of the image, where the frequency is at its highest).
note that pixel_diameter, origin, and degree_diameter must have only one value, w_r and w_a can
be lists or single values (and all combinations of them will be checked)
"""
if hasattr(pixel_diameter, '__iter__'):
raise Exception("pixel_diameter must *not* be iterable! All generated stimuli must be the same pixel_diameter")
if hasattr(origin, '__iter__'):
raise Exception("only one value of origin at a time!")
if hasattr(degree_diameter, '__iter__'):
raise Exception("only one value of degree_diameter at a time!")
if not hasattr(w_r, '__iter__'):
w_r = [w_r]
if not hasattr(w_a, '__iter__'):
w_a = [w_a]
rad = mkR(pixel_diameter, origin=origin)
mask_df = []
sf_df = []
eccens = [(i+i+1)/2 for i in np.linspace(*eccen_range, 10)]
angles = [0 for i in eccens]
for i, (f_r, f_a) in enumerate(itertools.product(w_r, w_a)):
fmask, mask = create_antialiasing_mask(pixel_diameter, f_r, f_a, origin, 0)
_, _, mag_cpp, _ = create_sf_maps_cpp(pixel_diameter, origin, w_r=f_r, w_a=f_a)
_, _, mag_cpd, _ = create_sf_maps_cpd(pixel_diameter, degree_diameter, origin, w_r=f_r, w_a=f_a)
data = {'mask_radius_pix': (~mask*rad).max(), 'w_r': f_r, 'w_a': f_a,
'freq_distance': np.sqrt(f_r**2 + f_a**2)}
data['mask_radius_deg'] = data['mask_radius_pix'] / (rad.max() / np.sqrt(2*(degree_diameter/2.)**2))
for name, mag in zip(['cpp', 'cpd'], [mag_cpp, mag_cpd]):
data[name + "_max"] = mag.max()
data[name + "_min"] = mag.min()
data[name + "_masked_max"] = (fmask * mag).max()
mask_df.append(pd.DataFrame(data, index=[i]))
sf = calculate_stim_local_sf(np.ones((pixel_diameter, pixel_diameter)), f_r, f_a,
'logpolar', eccens, angles,
degree_diameter/2)
sf = sf.rename(columns={'local_sf_magnitude': 'local_freq_cpd'})
sf['w_r'] = f_r
sf['w_a'] = f_a
sf['local_freq_cpp'] = sf['local_freq_cpd'] / (rad.max() / np.sqrt(2*(degree_diameter/2.)**2))
# period is easier to think about
sf['local_period_ppc'] = 1. / sf['local_freq_cpp']
sf['local_period_dpc'] = 1. / sf['local_freq_cpd']
sf_df.append(sf.reset_index())
return pd.concat(mask_df), pd.concat(sf_df).reset_index(drop=True)
def _set_ticklabels(datashape):
xticklabels = datashape[1]//10
if xticklabels == 0 or xticklabels == 1:
xticklabels = True
yticklabels = datashape[0]//10
if yticklabels == 0 or yticklabels == 1:
yticklabels = True
return xticklabels, yticklabels
def plot_stim_properties(mask_df, x='w_a', y='w_r', data_label='mask_radius_pix',
title_text="Mask radius in pixels",
fancy_labels={"w_a": r"$\omega_a$", "w_r": r"$\omega_r$"},
**kwargs):
"""plot the mask_df created by check_mask_radius, to visualize how mask radius depends on args.
fancy_labels is a dict of mask_df columns to nice (latex) ways of labeling them on the plot.
"""
def facet_heatmap(x, y, data_label, **kwargs):
data = kwargs.pop('data').pivot(y, x, data_label)
xticks, yticks = _set_ticklabels(data.shape)
sns.heatmap(data, xticklabels=xticks, yticklabels=yticks, **kwargs).invert_yaxis()
cmap = kwargs.pop('cmap', 'Blues')
font_scale = kwargs.pop('font_scale', 1.5)
plotting_context = kwargs.pop('plotting_context', 'notebook')
size = kwargs.pop('size', 3)
with sns.plotting_context(plotting_context, font_scale=font_scale):
g = sns.FacetGrid(mask_df, size=size)
cbar_ax = g.fig.add_axes([.92, .3, .02, .4])
g.map_dataframe(facet_heatmap, x, y, data_label, vmin=0,
vmax=mask_df[data_label].max(), cmap=cmap, cbar_ax=cbar_ax, **kwargs)
g.fig.suptitle(title_text)
g.fig.tight_layout(rect=[0, 0, .9, .95])
g.set_axis_labels(fancy_labels[x], fancy_labels[y])
def gen_log_polar_stim_set(size, freqs_ra=[(0, 0)], phi=[0], ampl=[1], origin=None,
number_of_fade_pixels=3, combo_stimuli_type=['spiral'], bytescale=True):
"""Generate the specified set of log-polar stimuli and apply the anti-aliasing mask
this function creates the specified log-polar stimuli, calculates what their anti-aliasing
masks should be, and applies the largest of those masks to all stimuli. It also applies an
outer mask so each of them is surrounded by faded, circular mask.
Note that this function should be run *last*, after you've determined your parameters and
checked to make sure the aliasing is taken care of.
Parameters
=============
freqs_ra: list of tuples of floats. the frequencies (radial and angular, in that order) of the
stimuli to create. Each entry in the list corresponds to one stimuli, which will use the
specified (w_r, w_a).
combo_stimuli_type: list with possible elements {'spiral', 'plaid'}. type of stimuli to create
when both w_r and w_a are nonzero, as described in the docstring for log_polar_grating (to
create radial and angular stimuli, just include 0 in w_a or w_r, respectively).
bytescale: boolean, default True. if True, calls bytescale(cmin=-1, cmax=1) on image to rescale
it to between 0 and 254 (mid-value is 127), with dtype uint8. this is done because this is
probably sufficient for displays and takes up much less space.
Returns
=============
masked stimuli, unmasked stimuli, and the mask used to mask the stimuli
"""
# we need to make sure that size, origin, and number_of_fade_pixels are not iterable and the
# other arguments are
if hasattr(size, '__iter__'):
raise Exception("size must *not* be iterable! All generated stimuli must be the same size")
if hasattr(origin, '__iter__'):
raise Exception("origin must *not* be iterable! All generated stimuli must have the same "
" origin")
if hasattr(number_of_fade_pixels, '__iter__'):
raise Exception("number_of_fade_pixels must *not* be iterable! It's a property of the mask"
" and we want to apply the same mask to all stimuli.")
# this isn't a typo: we want to make sure that freqs_ra is a list of tuples; an easy way to
# check is to make sure the *entries* of freqs_ra are iterable
if not hasattr(freqs_ra[0], '__iter__'):
freqs_ra = [freqs_ra]
if not hasattr(phi, '__iter__'):
phi = [phi]
if not hasattr(ampl, '__iter__'):
ampl = [ampl]
if not hasattr(combo_stimuli_type, '__iter__'):
combo_stimuli_type = [combo_stimuli_type]
stimuli = []
masked_stimuli = []
mask = []
for w_r, w_a in freqs_ra:
_, tmp_mask = create_antialiasing_mask(size, w_r, w_a, origin, number_of_fade_pixels)
mask.append(tmp_mask)
mask.append(create_outer_mask(size, origin, None, number_of_fade_pixels)[1])
if len(mask) > 1:
mask = np.logical_and.reduce(mask)
else:
mask = mask[0]
mask = _fade_mask(mask, number_of_fade_pixels, number_of_fade_pixels, origin)
for (w_r, w_a), A in itertools.product(freqs_ra, ampl):
stimuli.append([])
masked_stimuli.append([])
for p in phi:
if w_r == 0 and w_a == 0:
# this is the empty stimulus
continue
if 0 in [w_r, w_a] or 'spiral' in combo_stimuli_type:
tmp_stimuli = log_polar_grating(size, w_r, w_a, p, A, origin)
if bytescale:
masked_stimuli[-1].append(bytescale_func(tmp_stimuli*mask, cmin=-1, cmax=1))
stimuli[-1].append(bytescale_func(tmp_stimuli, cmin=-1, cmax=1))
else:
masked_stimuli[-1].append(tmp_stimuli*mask)
stimuli[-1].append(tmp_stimuli)
if 'plaid' in combo_stimuli_type and 0 not in [w_r, w_a]:
tmp_stimuli = (log_polar_grating(size, w_r, 0, p, A, origin) +
log_polar_grating(size, 0, w_a, p, A, origin))
if bytescale:
masked_stimuli[-1].append(bytescale_func(tmp_stimuli*mask, cmin=-1, cmax=1))
stimuli[-1].append(bytescale_func(tmp_stimuli, cmin=-1, cmax=1))
else:
masked_stimuli[-1].append(tmp_stimuli*mask)
stimuli[-1].append(tmp_stimuli)
return masked_stimuli, stimuli, mask
def create_sin_cpp(size, w_x, w_y, phase=0, origin=None):
"""create a full 2d sine wave, with frequency in cycles / pixel
"""
if origin is None:
origin = [(size+1) / 2., (size+1) / 2.]
x = np.array(range(1, size+1))
x, y = np.meshgrid(x - origin[0], x - origin[1])
return np.cos(2*np.pi*x*w_x + 2*np.pi*y*w_y + phase)
def gen_constant_stim_set(size, mask, freqs_xy=[(0, 0)], phi=[0], ampl=[1], origin=None,
bytescale=True):
"""Generate the specified set of constant grating stimuli and apply the supplied mask
this function creates the specified constant grating stimuli and applies the supplied mask to
all stimuli. It also applies an outer mask so each of them is surrounded by faded, circular
mask.
Note that this function should be run *last*, after you've determined your parameters and
checked to make sure the aliasing is taken care of.
Parameters
=============
freqs_xy: list of tuples of floats. the frequencies (x and y, in that order) of the stimuli to
create. Each entry in the list corresponds to one stimuli, which will use the specified (w_x,
w_y). They sould be in cycles per pixel.
bytescale: boolean, default True. if True, calls bytescale(cmin=-1, cmax=1) on image to rescale
it to between 0 and 254 (mid-value is 127), with dtype uint8. this is done because this is
probably sufficient for displays and takes up much less space.
Returns
=============
masked stimuli and unmasked stimuli
"""
# we need to make sure that size, origin, and number_of_fade_pixels are not iterable and the
# other arguments are
if hasattr(size, '__iter__'):
raise Exception("size must *not* be iterable! All generated stimuli must be the same size")
if hasattr(origin, '__iter__'):
raise Exception("origin must *not* be iterable! All generated stimuli must have the same "
" origin")
# this isn't a typo: we want to make sure that freqs_xy is a list of tuples; an easy way to
# check is to make sure the *entries* of freqs_xy are iterable
if not hasattr(freqs_xy[0], '__iter__'):
freqs_xy = [freqs_xy]
if not hasattr(phi, '__iter__'):
phi = [phi]
if not hasattr(ampl, '__iter__'):
ampl = [ampl]
stimuli = []
masked_stimuli = []
for (w_x, w_y), A in itertools.product(freqs_xy, ampl):
stimuli.append([])
masked_stimuli.append([])
for p in phi:
if w_x == 0 and w_y == 0:
# this is the empty stimulus
continue
else:
tmp_stimuli = A * create_sin_cpp(size, w_x, w_y, p, origin=origin)
if bytescale:
masked_stimuli[-1].append(bytescale_func(tmp_stimuli*mask, cmin=-1, cmax=1))
stimuli[-1].append(bytescale_func(tmp_stimuli, cmin=-1, cmax=1))
else:
masked_stimuli[-1].append(tmp_stimuli*mask)
stimuli[-1].append(tmp_stimuli)
return masked_stimuli, stimuli
def _gen_freqs(base_freqs, n_orientations=4, n_intermed_samples=2, round_flag=True):
"""turn the base frequencies into the full set.
base frequencies are the distance from the center of frequency space.
n_orientations: int, the number of "canonical orientations". That is, the number of
orientations that should use the base_freqs. We will equally sample orientation space, so that
if n_orientations==4, then we'll use angles 0, pi/4, pi/2, 3*pi/4
n_intermed_samples: int, the number of samples at the intermediate frequency. In order to
sample some more orientations, we pick the middle frequency out of base_freqs and then sample
n_intermed_samples times between the canonical orientations at that frequency. For example, if
this is 2 and n_orientations is 4, we will sample the intermediate frequency at pi/12, 2*pi/12,
4*pi/12, 5*pi/12, 7*pi/12, 8*pi/12, 10*pi/12, 11*pi/12.
"""
intermed_freq = base_freqs[len(base_freqs)//2]
ori_angles = [np.pi*1/n_orientations*i for i in range(n_orientations)]
# the following determines how to step through the angles so to get n_intermed_angles different
# intermediary angles
n_intermed_steps = int(n_orientations * (n_intermed_samples+1))
intermed_locs = [i for i in range(n_intermed_steps)
if i % (n_intermed_steps/n_orientations) != 0]
intermed_angles = [np.pi*1/n_intermed_steps*i for i in intermed_locs]
# these are the canonical orientations
freqs = [(f*np.sin(a), f*np.cos(a)) for a, f in itertools.product(ori_angles, base_freqs)]
# arc, where distance from the origin is half the max (in log space)
# skip those values which we've already gotten: 0, pi/4, pi/2, 3*pi/4, and pi
freqs.extend([(intermed_freq*np.sin(i),
intermed_freq*np.cos(i)) for i in intermed_angles])
if round_flag:
freqs = np.round(freqs)
return freqs
def _create_stim(pixel_diameter, freqs, phi, n_exemplars, output_dir, stimuli_name,
stimuli_description_csv_name, col_names, stim_type, mask=None):
"""helper function to create the stimuli and and stimuli description csv
stim_type: {'logpolar', 'constant'}. which type of stimuli to make. determines which function
to call, gen_log_polar_stim_set or gen_constant_stim_set. if constant, mask must be set
"""
if stim_type == 'logpolar':
masked_stim, stim, mask = gen_log_polar_stim_set(pixel_diameter, freqs, phi)
elif stim_type == 'constant':
masked_stim, stim = gen_constant_stim_set(pixel_diameter, mask, freqs, phi)
# in order to get this the right shape
stim = np.array(stim).transpose(2, 3, 0, 1)
np.save(os.path.join(output_dir, stimuli_name), stim)
df = []
for i, (w_1, w_2) in enumerate(freqs):
for j, p in enumerate(phi):
df.append((w_1, w_2, p, pixel_diameter, i, j))
df =
|
pd.DataFrame(df, columns=col_names)
|
pandas.DataFrame
|
'''
Library for Google Sheets functions.
'''
from constants import *
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import pickle
import os
import pandas as pd
import re
def read_values(sheetid, range_, config):
# returns values read from a google sheet, as is.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
TOKEN = config['token']
CREDENTIALS = config['credentials']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN):
with open(TOKEN, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOKEN, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
values = sheet.values().get(spreadsheetId=sheetid, range=range_).execute().get('values', [])
if not values:
raise ValueError('Sheet data not found')
else:
return values
def insert_values(sheetid, body, config, **kwargs):
'''
Insert values into spreadsheet.
range should be included in body.
example body:
body = {
'range': 'SheetName!A1:A3',
'majorDimension': 'ROWS',
'values': [[1], [2], [3]]
}
'''
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
TOKEN = config['token']
CREDENTIALS = config['credentials']
INPUTOPTION = kwargs['inputoption'] if 'inputoption' in kwargs.keys() else 'USER_ENTERED'
# values = list
# placement = A1 notation range.
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN):
with open(TOKEN, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOKEN, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
request = sheet.values().update(spreadsheetId=sheetid,
range=body['range'],
body=body,
valueInputOption=INPUTOPTION)
response = request.execute()
return response
def values2dataframe(values):
'''
Convert raw values as retrieved from read_values to a pandas dataframe
Adds a "row" number going off the assumption that we are reading from the top.
'''
columns = values[0]
ncols = len(columns)
data = values[1:]
for d in data:
if len(d) < ncols:
extension = ['']*(ncols-len(d))
d.extend(extension)
data = pd.DataFrame(data=data, columns=columns)
data['row'] = list(range(2, len(data)+2)) # keeping row number (+1 for 1 indexing +1 for column headers in sheet)
data['row'] = data['row'].astype(str)
return data
def index2A1(num):
if 0 <= num <= 25:
return alpha[num]
elif 26 <= num <= 51:
return 'A{}'.format(alpha[num%26])
elif 52 <= num <= 77:
return 'B{}'.format(alpha[num%26])
else:
raise ValueError('Could not convert index "{}" to A1 notation'.format(num))
def get_trailing_spaces(data):
'''
Generate error table for trailing whitespaces (front and back).
return: error_table[row, ID, column_name, value].
'''
# fix trailing spaces. This applies to all columns except "row"
df = data.copy()
error_table = pd.DataFrame(columns=['row', 'ID', 'column', 'value', 'fix'])
for c in df.columns:
if c == 'row':
continue
else:
stripped = df[c].str.strip()
invalid_bool = stripped != df[c]
invalid = df[invalid_bool][['row', 'ID']].copy()
invalid['column'] = c
invalid['value'] = df[c][invalid_bool].copy()
invalid['fix'] = stripped[invalid_bool]
error_table = error_table.append(invalid, ignore_index=True, sort=True)
return error_table
def get_NA_errors(data):
'''
Generate error table for mispelled NA values.
We chose to write them as "NA", and so far we only
replace "N/A" with "NA"
return error_table[row, ID, column, value, fix
'''
df = data.copy()
table = pd.DataFrame(columns=['row', 'ID', 'column', 'value', 'fix'])
for c in df.columns:
if c == 'row':
continue
else:
test = df[c].str.match('N/A')
errors = df[test][['row', 'ID']]
errors['column'] = c
errors['value'] = df[test][c]
errors['fix'] = df[test][c].replace('N/A', 'NA')
table = table.append(errors, ignore_index=True, sort=True)
return table
def ErrorTest(data, columns, rgx, table):
'''
Test a regex pattern on passed columns, generate error table
for things that did not pass the test.
Note this does not generate the fix. We do this after.
'''
df = data.copy()
for c in columns:
test = df[c].str.match(rgx)
invalid = df[~test][['row', "ID"]].copy()
invalid['column'] = c
invalid['value'] = df[~test][c]
table = table.append(invalid, ignore_index=True, sort=False)
return table
def fix_cells(sheetid, sheetname, error_table, column_dict, config):
'''
Fix specific cells on the private sheet, based on error table.
Error table also needs to provide the "fix" column which is what
we are replacing the current value with.
:column_dict: map from 'column_name' to A1 notation.
'''
assert 'fix' in error_table.columns
assert 'value' in error_table.columns
fixed = 0
for i,error in error_table.iterrows():
row = error['row']
a1 = column_dict[error['column']] + row
range_ = '{}!{}'.format(sheetname, a1)
try:
fetch = read_values(sheetid, f'{sheetname}!A{row}', config) # fetch ID to ensure that it is the same.
assert error['ID'] == fetch[0][0]
body = {
'range': range_,
'majorDimension': 'ROWS',
'values': [[error['fix']]]
}
insert_values(sheetid, body, config)
fixed += 1
except Exception as E:
print(error)
print(fetch)
raise E
return fixed
def generate_error_tables(data):
'''
Generate table for fields that don't pass the rgex tests.
For easy fixes (e.g. spacing) we can do it automatically, for tricker ones we save the table (fixed ones are omitted in error_report)
'''
table = pd.DataFrame(columns=['row', 'ID', 'value'])
table = ErrorTest(data, ['age'], rgx_age, table)
table = ErrorTest(data, ['sex'], rgx_sex, table)
table = ErrorTest(data, ['city', 'province', 'country'], rgx_country, table)
table = ErrorTest(data, ['latitude', 'longitude'], rgx_latlong, table)
table = ErrorTest(data, ['geo_resolution'], rgx_geo_res, table)
table = ErrorTest(data, date_columns, rgx_date, table)
table = ErrorTest(data, ['lives_in_Wuhan'], rgx_lives_in_wuhan, table)
fixable_errors =
|
pd.DataFrame(columns=['row', 'ID', 'column', 'value', 'fix'])
|
pandas.DataFrame
|
#!/usr/bin/env python3.8
# Copyright [2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, hashlib, os, subprocess, sys, time
import sys
import shlex
import subprocess
import requests, sys
import re
import requests
import json
from datetime import datetime
import cx_Oracle
from getpass import getpass
import collections
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(prog='data_analysis_script.py', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
+ ============================================================ +
| European Nucleotide Archive (ENA) data flow monitoring Tool |
| |
| Tool to to analyse and compare data from NCBI or ENA |
+ =========================================================== +
This script is used to analyse the data from NCBI and ENA that been produced by data_fetch_script.py.
""")
parser.add_argument('-f', '--file', help='path for the input files', type=str, required=True)
args = parser.parse_args()
"""
Request username and password for databases
"""
def get_oracle_usr_pwd():
if database == 'reads':
return ['era_reader', 'reader']
elif database == 'sequences':
return ['ena_reader', 'reader']
"""
Setup the connection to ENAPRO and ERAPRO.
"""
def setup_connection():
oracle_usr, oracle_pwd = get_oracle_usr_pwd()
client_lib_dir = os.getenv('ORACLE_CLIENT_LIB')
if database == 'sequences':
if not client_lib_dir or not os.path.isdir(client_lib_dir):
sys.stderr.write("ERROR: Environment variable $ORACLE_CLIENT_LIB must point at a valid directory\n")
exit(1)
cx_Oracle.init_oracle_client(lib_dir=client_lib_dir)
connection = None
try:
dsn = cx_Oracle.makedsn("ora-vm5-008.ebi.ac.uk", 1531, service_name="ENAPRO")
connection = cx_Oracle.connect(oracle_usr, oracle_pwd, dsn, encoding="UTF-8")
return connection
except cx_Oracle.Error as error:
print(error)
else:
if not client_lib_dir or not os.path.isdir(client_lib_dir):
sys.stderr.write("ERROR: Environment variable $ORACLE_CLIENT_LIB must point at a valid directory\n")
exit(1)
cx_Oracle.init_oracle_client(lib_dir=client_lib_dir)
connection = None
try:
dsn = cx_Oracle.makedsn("ora-vm-009.ebi.ac.uk", 1541, service_name="ERAPRO")
connection = cx_Oracle.connect(oracle_usr, oracle_pwd, dsn, encoding="UTF-8")
return connection
except cx_Oracle.Error as error:
print(error)
"""
Query ENAPRO dataset, process the data and fetching release date from NCBI nucleotide database. Print to a file.
"""
def fetch_and_filter_seq(connection, output):
# This Part is for querying ENAPRO
c = connection.cursor()
f = open(f"{args.file}/analysis.inENAPRO.sequences.log.txt", "w")
header = "\t".join(['Accession', 'First_public', 'Last_public', 'status_id' ])
f.write(str(header) + "\n")
accession_list_seq = []
for accession in output:
c.execute(f"select TRUNC(first_public), TRUNC(last_public), statusid from dbentry where primaryacc# in ('{accession}')")
for row in c:
f.write(str(accession) + "\t" + str(row[0]) + "\t" + str(row[1]) + "\t" + str(row[2]) + "\n" )
accession_list_seq.append(accession)
f.close()
# Data analysis, to retrive the data that missing from ENAPRO
print('Data Processing............')
noENAPRO_f = open(f"{args.file}/analysis.noENAPRO.sequences.log.txt", "w")
accession_set_seq=set(accession_list_seq)
accession_set_seq_diff = output.difference(accession_set_seq)
no_enapro_list= [acc for acc in accession_set_seq_diff ]
# To fetch the release date from NCBI ( nucleotide database) for the data that missing from ENAPRO ( This command uses 'esearch', 'xtract' and 'efetch' functions, entrez-direct is needed)
release_date_list = []
for i in range(0, len(no_enapro_list), 100):
stripped_list = ', '.join(no_enapro_list[i:i + 100])
command = 'esearch -db nucleotide -query "{}" | efetch -format docsum |xtract -pattern DocumentSummary -element Caption, UpdateDate'.format(stripped_list)
sp = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
if "command not found" in err.decode():
sys.stderr.write(err.decode() + "\n This command uses 'esearch', 'xtract' and 'efetch' functions.\n "
"You might need to download and install 'entrez-direct' to fetch any data from NCBI. "
"\n Please follow the instructions in the link provided below. "
"\n https://www.ncbi.nlm.nih.gov/books/NBK179288/ \n "
"Please note that 'entrez-direct' only runs on Unix and Macintosh environments or under the Cygwin Unix-emulation environment on Windows \n ")
exit(1)
else:
sys.stderr.write(err.decode())
stdoutOrigin = sys.stdout
release_date = out.decode().strip("\n").split("\n")
for obj in release_date:
release_date_list.append(obj)
sys.stdout = stdoutOrigin
noENAPRO_f.write("\n".join(release_date_list) + "\n")
noENAPRO_f.close()
# conn.close()
"""
Query ERAPRO dataset and processing the data. Print to a file.
"""
def fetch_and_filter_reads(connection, output):
# This Part is for querying ERAPRO
c = connection.cursor()
f = open(f"{args.file}/analysis.inERAPRO.reads.log.txt", "w")
header = "\t".join(['Accession', 'Status_id' 'First_public', 'Last_updated'])
f.write(str(header) + "\n")
accession_list_reads = []
for accession in output:
c.execute(
f"select status_id, first_public, last_updated from experiment where experiment_id in ('{accession}')")
for row in c:
f.write(str(accession) + "\t" + str(row[0]) + "\t" + str(row[1]) + "\t" + str(row[2]) + "\n")
accession_list_reads.append(accession)
f.close()
# Data analysis, to retrive the data that missing from ERAPRO
print('Data Processing...........')
accession_set_reads=set(accession_list_reads)
accession_set_reads_diff = output.difference(accession_set_reads)
noERAPRO_df= pd.DataFrame({'accession': list(accession_set_reads_diff)}, columns=['accession'])
inner_join = pd.merge(noERAPRO_df, sra_df, on='accession', how='inner')
inner_join.to_csv(f"{args.file}/analysis.noERAPRO.reads.log.txt")
"""
getting the difference between reads in NCBI and ENA advanced search. Print to a file.
"""
def reads_dataset_difference ():
output = set(sra_df.accession).difference(set(ena_read_df.accession))
length_read_set = len(output)
f = open(f"{args.file}/NCBI_vs_ENA_{database}.log.txt", "w")
values = "\n".join(map(str, list(output)))
f.write(values)
f.close()
print('Number of data found in NCBI ( SRA database) but missing in ENA advanced search is: ', length_read_set)
return output
"""
getting the difference between sequences in NCBI and ENA advanced search. Print to a file.
"""
def sequence_dataset_difference ():
output = ncbivirus_set.difference(set(ena_seq_df.accession))
length_seq_set = len(output)
f = open(f"{args.file}/NCBI_vs_ENA_{database}.log.txt", "w")
f.write("\n".join(output) + "\n")
f.close()
print('Number of data found in NCBI ( NCBIVirus database) but missing in ENA advanced search is: ', length_seq_set)
return output
"""
getting the difference between sequences in COVID-19 portal and ENA advanced search. Print to a file.
"""
def covid_advanced_search_difference (ena_dataset, covid_reads_portal_df):
# Obtain the reads difference between Advanced search and COVID-19 Portal
covid_portal_output = set(ena_dataset.accession).difference(set(covid_reads_portal_df.accession))
covid_diff_leng_set = len(covid_portal_output)
covid_portal_output_df = pd.DataFrame({'accession': list(covid_portal_output)}, columns=['accession'])
covid_inner_join = pd.merge(covid_portal_output_df, ena_dataset, on='accession', how='inner')
covid_inner_join.to_csv(f"{args.file}/Covid19Portal.vs.ENA.advanced.{database}.log.txt",sep="\t", index = None)
print(f"Number of {database} found in ENA advanced search but missing in COVID-19 data portal is: ", covid_diff_leng_set)
# to create a list of reads duplicates if present in COVID-19 data Portal
f_duplicates = open(f"{args.file}/Duplicates.Covid19Portal.{database}.log.txt", "w")
covid_duplicate_list= []
for item, count in collections.Counter(covid_reads_portal_df).items():
if count > 1:
f_duplicates.write(str(item) + "\n")
covid_duplicate_list.append(item)
f_duplicates.close()
length_covid_duplicate = len(covid_duplicate_list)
print(f"Number of {database} found duplicated in COVID-19 data portal is: ", length_covid_duplicate)
def advanced_search_ebi_search_difference(ebi_df, ena_df):
# For data found in EBI search but missing in ENA advanced search
output_1 = set(ebi_df.accession).difference(set(ena_df.accession))
setLength_1 = len(output_1)
ebi_output1_df = pd.DataFrame({'accession': list(output_1)}, columns=['accession'])
ebi1_inner_join = pd.merge(ebi_output1_df, ebi_df, on='accession', how='inner')
print(f'Number of {database} found in EBI search but missing in ENA advanced search is: ', setLength_1)
ebi1_inner_join.to_csv(f"{args.file}/EBIsearch_vs_ENAadvanced_{database}.log.txt", sep="\t", index = None)
# For data found in ENA advanced search but missing in EBI search
output_2 = set(ena_df.accession).difference(set(ebi_df.accession))
setLength_2 = len(output_2)
ebi_output2_df = pd.DataFrame({'accession': list(output_2)}, columns=['accession'])
ebi2_inner_join = pd.merge(ebi_output2_df, ena_df, on='accession', how='inner')
print(f'Number of {database} found in ENA advanced search but missing in EBI search is: ', setLength_2)
ebi2_inner_join.to_csv(f"{args.file}/ENAadvanced_vs_EBIsearch_{database}.log.txt", sep="\t", index = None)
#############
## MAIN ##
#############
database = input("please indicate the dataset type, ex: sequences or reads: ").lower()
if database == 'reads':
#Connecting to ERAPRO
sys.stderr.write("Connecting to ERAPRO...\n")
db_conn = setup_connection()
#Uploading Files from NCBI, ENA, ebisearch
sra_df =pd.read_csv(f"{args.file}/NCBI.sra.log.txt", sep="\t", header=None, names =['run_id', 'accession', 'release_date'])
print ('Number of Reads in NCBI (SRA database) is: ', len(sra_df))
ena_read_df = pd.read_csv(f"{args.file}/ENA.read_experiment.log.txt", sep="\t", header=None, names =['accession', 'date'])
print('Number of Reads in ENA Advanced Search is: ', len(ena_read_df))
ebi_read_df= pd.read_csv(f"{args.file}/EBIsearch.sra-experiment-covid19.log.txt", sep="\t", header=None, names =['accession', 'date'])
print('Number of Reads in EBI Search is: ', len(ebi_read_df))
#Uploading files from COVID-19 data Portal
covid_reads_portal_df = pd.read_csv(f"{args.file}/Covid19DataPortal.raw-reads.log.txt", sep="\t", header=None, names=['accession'])
print('Number of Reads in COVID-19 data portal is: ', len(covid_reads_portal_df))
#Obtain the difference between the data in NCBI and ENA
output = reads_dataset_difference()
#Obtain the reads difference between Advanced search and COVID-19 Portal, and duplicates if present
covid_advanced_search_difference(ena_read_df, covid_reads_portal_df)
# Obtain the reads difference between Advanced search and EBI search
advanced_search_ebi_search_difference(ebi_read_df, ena_read_df)
#Querying ERAPRO
sys.stderr.write("Querying ERAPRO ..........\n")
fetch_and_filter_reads(db_conn, output)
elif database == 'sequences':
#Connecting to ENAPRO
sys.stderr.write("Connecting to ENAPRO...\n")
db_conn = setup_connection()
# Uploading Files from NCBI and ENA
ncbivirus_set = set(open(f"{args.file}/NCBI.ncbivirus.log.txt").read().split())
print('Number of Sequences in NCBI (NCBIVirus database) is: ', len(ncbivirus_set))
ena_seq_df =
|
pd.read_csv(f"{args.file}/ENA.sequence.log.txt", sep="\t", header=None, names=['accession', 'date'])
|
pandas.read_csv
|
from collections import defaultdict
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics import f1_score
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def split_labeled(data):
is_labeled = (data['label'] != -1)
return data[is_labeled], data[~is_labeled]
# def split_dataset(raw_dataset_path, new_dataset_path):
# # 主要是方便EDA
# item_cols = [f'i{i}' for i in range(1, 72+1)]
# user_cols = [f'u{i}' for i in range(1, 80+1)]
# try:
# with open(raw_dataset_path, 'r', encoding='utf-8') as rf:
# with open(new_dataset_path, 'w+', encoding='utf-8') as wf:
# if "train" in raw_dataset_path:
# header = f"""uuid,visit_time,user_id,item_id,{str(item_cols+user_cols)[2:-2].replace("'", "").replace(" ","")},label"""
# else: # "predict"
# header = f"""uuid,visit_time,user_id,item_id,{str(item_cols+user_cols)[2:-2].replace("'", "").replace(" ","")}"""
# wf.write(header+'\n')
# for line in rf:
# if "features" in line:
# continue
# line = str(line[:].split(" ")).replace("'", "")[1:-3]
# wf.write(line+'\n')
# except FileNotFoundError:
# print(f'{raw_dataset_path} 文件不存在!')
# def read_split_data(path, nrows=1000000):
# df_chunk = pd.read_csv(path, chunksize=1e6, iterator=True, nrows=nrows)
# data = pd.concat([chunk for chunk in df_chunk])
# data = reduce_mem_usage(data)
# return data
def read_data(path='/tcdata/train0.csv', nrows=1000000):
if "train" in path:
df_chunk = pd.read_csv(path, chunksize=1e6, iterator=True,
names=["uuid", "visit_time", "user_id", "item_id", "features", "label"], nrows=nrows)
data = pd.concat([chunk for chunk in df_chunk])
data = reduce_mem_usage(data)
elif "predict" in path:
df_chunk = pd.read_csv(path, chunksize=5e5, iterator=True,
names=["uuid", "visit_time", "user_id", "item_id", "features"], nrows=nrows)
data = pd.concat([chunk for chunk in df_chunk])
data = reduce_mem_usage(data)
else: # "truth"
data = pd.read_csv(path, names=["uuid", "label"], nrows=nrows)
return data
def label_user_item_via_blacklist(data):
data_labeled, data_no_labeled = split_labeled(data)
data_spam = data_labeled[data_labeled.label == 1]
data_norm = data_labeled[data_labeled.label == 0]
try:
user_spam_dict = load_obj("user_black_dict")
item_spam_dict = load_obj("item_black_dict")
print("更新 user 和 item 黑名单")
except:
user_spam_dict = defaultdict(int)
item_spam_dict = defaultdict(int)
print("新建 user 和 item 黑名单")
for _, row in data_spam[['user_id', 'item_id']].iterrows():
u, i = row['user_id'], row['item_id']
user_spam_dict[u] += 1 # 记录次数
item_spam_dict[i] += 1 # 记录次数
save_obj(user_spam_dict, "user_black_dict")
save_obj(item_spam_dict, "item_black_dict")
# 1、根据label=1确定绝对无误的用户黑名单和商品黑名单
# 2、根据label=0 以及用户黑名单 确定当前用户是恶意的 则当前商品是正常的,将当前商品更新进商品白名单
# 根据label=0 以及商品黑名单 确定当前商品是恶意的 则当前用户是正常的,将当前用户更新进用户白名单
# 3、根据用户白名单 以及label=0 确定当前用户是正常的 则当前商品是(正常或潜在恶意的)
# 根据商品白名单 以及label=0 确定当前商品是正常的 则当前用户是(正常或潜在恶意的)
# 4、根据label=-1 以及 更新完毕的黑白名单 确定用户和商品的标签
# 可以忽略步骤3
try:
user_norm_dict = load_obj("user_white_dict")
item_norm_dict = load_obj("item_white_dict")
print("更新 user 和 item 白名单")
except:
user_norm_dict = defaultdict(int)
item_norm_dict = defaultdict(int)
print("新建 user 和 item 白名单")
for _, row in data_norm[['user_id', 'item_id']].iterrows():
u, i = row['user_id'], row['item_id']
if i in item_spam_dict.keys(): # 如果当前商品是恶意的
user_norm_dict[u] = 0 # 用户则是正常的,加入白名单
# else: #当前商品可能正常或潜在恶意
if u in user_spam_dict.keys(): # 如果当前用户是恶意的
item_norm_dict[i] = 0 # 商品则是正常的,加入白名单
# else: #当前用户可能正常或潜在恶意
# user_unknown_dict[u] = 0 #潜在的
save_obj(user_norm_dict, "user_white_dict")
save_obj(item_norm_dict, "item_white_dict")
print("基于黑名单和白名单,给未知样本打上标签")
def black_white_dict(ui, black_dict, white_dict):
if ui in black_dict.keys():
return 1
elif ui in white_dict.keys():
return 0
else:
return -1
data_no_labeled['user_label'] = data_no_labeled['user_id'].apply(
lambda u: black_white_dict(u, user_spam_dict, user_norm_dict))
data_no_labeled['item_label'] = data_no_labeled['item_id'].apply(
lambda i: black_white_dict(i, item_spam_dict, item_norm_dict))
def ui_label2label(u, i):
if u == 1 and i == 1:
return 1
elif ((u == 1 and i == 0) or (u == 0 and i == 1) or (u == 0 and i == 0)):
return 0
else:
return -1
data_no_labeled['label'] = list(map(lambda u, i: ui_label2label(
u, i), data_no_labeled['user_label'], data_no_labeled['item_label']))
data_labeled['user_label'] = data_labeled['user_id'].apply(
lambda u: black_white_dict(u, user_spam_dict, user_norm_dict))
data_labeled['item_label'] = data_labeled['item_id'].apply(
lambda i: black_white_dict(i, item_spam_dict, item_norm_dict))
data =
|
pd.concat([data_no_labeled, data_labeled], axis=0)
|
pandas.concat
|
from tokenize import String
from typing import Dict
import flask
from flask import request, jsonify
from flask import Flask, render_template
import json
from os import path
import yaml
import pandas as pd
server_config_file = "config.json"
with open(server_config_file, "r") as f:
server_config = json.load(f)
IP_ADDR = server_config["ip"]
PORT = server_config["port"]
app = flask.Flask(__name__)
# app.config["DEBUG"] = True
class Leaderboard:
"""A single leaderboard page"""
@staticmethod
def config_file(id): return f"{server_config['data_path']}/{id}-config.yml"
@staticmethod
def data_file(id): return f"{server_config['data_path']}/{id}.csv"
@classmethod
def check(cls, id):
"""Checks whether the leaderboard exists"""
return path.exists(cls.config_file(id)) and path.exists(cls.data_file(id))
def __init__(self, id):
self.id = id
self.title = None
self.results: Dict[String,pd.DataFrame] = None
self.table_name_column = None
self.sort_cols = []
self.table_names = {}
self.default_sort_col = None
def load_config(self):
with open(self.config_file(self.id), "r") as f:
config = yaml.safe_load(f)
self.title = config['title']
self.sort_cols = []
self.cols_config = {}
try:
self.default_sort_col = config['default_sort_col']
except:
self.default_sort_col = None
try:
self.legend = config['legend']
except:
self.legend = None
for i, col in enumerate(config['sort_cols']):
if isinstance(col, str):
self.sort_cols.append(col)
self.cols_config[col] = {}
else:
key = list(col.keys())[0]
self.sort_cols.append(key)
self.cols_config[key] = config['sort_cols'][i][key]
print(self.cols_config)
print(f'Sort cols: {self.sort_cols}')
if "table_name_column" in config:
self.table_name_column = config['table_name_column']
self.table_names = config['table_names']
def load_data(self):
self.results = {}
try:
table = pd.read_csv(Leaderboard.data_file(self.id)).round(2)
except:
table =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import requests
import time
pd.set_option('display.max_columns', 500)
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
|
TS('2015-03-20')
|
pandas.Timestamp
|
from nose.tools import assert_equal
from mock import patch
from records_mover.records.schema.field import RecordsSchemaField
from records_mover.records.schema.field.constraints import (
RecordsSchemaFieldIntegerConstraints,
RecordsSchemaFieldDecimalConstraints,
)
import numpy as np
import pandas as pd
def with_nullable(nullable: bool, fn):
def wrapfn(*args, **kwargs):
with patch(
"records_mover.records.schema.field.pandas.supports_nullable_ints",
return_value=nullable,
):
fn(*args, **kwargs)
return wrapfn
def check_dtype(field_type, constraints, expectation):
field = RecordsSchemaField(
name="test",
field_type=field_type,
constraints=constraints,
statistics=None,
representations=None,
)
out = field.cast_series_type(pd.Series(1, dtype=np.int8))
assert_equal(out.dtype, expectation)
def test_to_pandas_dtype_integer_no_nullable():
expectations = {
(-100, 100): np.int8,
(0, 240): np.uint8,
(-10000, 10000): np.int16,
(500, 40000): np.uint16,
(-200000000, 200000000): np.int32,
(25, 4000000000): np.uint32,
(-9000000000000000000, 2000000000): np.int64,
(25, 10000000000000000000): np.uint64,
(25, 1000000000000000000000000000): np.float128,
(None, None): np.int64,
}
for (min_, max_), expected_pandas_type in expectations.items():
constraints = RecordsSchemaFieldIntegerConstraints(
required=True, unique=None, min_=min_, max_=max_
)
yield with_nullable(
False, check_dtype
), "integer", constraints, expected_pandas_type
def test_to_pandas_dtype_integer_nullable():
expectations = {
(-100, 100): pd.Int8Dtype(),
(0, 240): pd.UInt8Dtype(),
(-10000, 10000): pd.Int16Dtype(),
(500, 40000): pd.UInt16Dtype(),
(-200000000, 200000000):
|
pd.Int32Dtype()
|
pandas.Int32Dtype
|
import spacy
import pandas as pd
from collections import Counter
from tqdm import tqdm
import json
nlp = spacy.load("en_core_sci_sm")
####################
# Process entities and relations for a given abstract.
def get_entities_in_sent(sent, entities):
start, end = sent.start_char, sent.end_char
start_ok = entities["char_start"] >= start
end_ok = entities["char_end"] <= end
keep = start_ok & end_ok
res = entities[keep]
return res
def align_one(sent, row):
# Don't distinguish b/w genes that can and can't be looked up in database.
lookup = {"GENE-Y": "GENE",
"GENE-N": "GENE",
"CHEMICAL": "CHEMICAL"}
start_tok = None
end_tok = None
for tok in sent:
if tok.idx == row["char_start"]:
start_tok = tok
if tok.idx + len(tok) == row["char_end"]:
end_tok = tok
if start_tok is None or end_tok is None:
return None
else:
expected = sent[start_tok.i - sent.start:end_tok.i - sent.start + 1]
if expected.text != row.text:
raise Exception("Entity mismatch")
return (start_tok.i, end_tok.i, lookup[row["label"]])
def align_entities(sent, entities_sent):
aligned_entities = {}
missed_entities = {}
for _, row in entities_sent.iterrows():
aligned = align_one(sent, row)
if aligned is not None:
aligned_entities[row["entity_id"]] = aligned
else:
missed_entities[row["entity_id"]] = None
return aligned_entities, missed_entities
def format_relations(relations):
# Convert to dict.
res = {}
for _, row in relations.iterrows():
ent1 = row["arg1"].replace("Arg1:", "")
ent2 = row["arg2"].replace("Arg2:", "")
key = (ent1, ent2)
res[key] = row["label"]
return res
def get_relations_in_sent(aligned, relations):
res = []
keys = set()
# Loop over the relations, and keep the ones relating entities in this sentences.
for ents, label in relations.items():
if ents[0] in aligned and ents[1] in aligned:
keys.add(ents)
ent1 = aligned[ents[0]]
ent2 = aligned[ents[1]]
to_append = ent1[:2] + ent2[:2] + (label,)
res.append(to_append)
return res, keys
####################
# Manage a single document and a single fold.
def one_abstract(row, df_entities, df_relations):
doc = row["title"] + " " + row["abstract"]
doc_key = row["doc_key"]
entities = df_entities.query(f"doc_key == '{doc_key}'")
relations = format_relations(df_relations.query(f"doc_key == '{doc_key}'"))
processed = nlp(doc)
entities_seen = set()
entities_alignment = set()
entities_no_alignment = set()
relations_found = set()
scierc_format = {"doc_key": doc_key, "dataset": "chemprot", "sentences": [], "ner": [],
"relations": []}
for sent in processed.sents:
# Get the tokens.
toks = [tok.text for tok in sent]
# Align entities.
entities_sent = get_entities_in_sent(sent, entities)
aligned, missed = align_entities(sent, entities_sent)
# Align relations.
relations_sent, keys_found = get_relations_in_sent(aligned, relations)
# Append to result list
scierc_format["sentences"].append(toks)
entities_to_scierc = [list(x) for x in aligned.values()]
scierc_format["ner"].append(entities_to_scierc)
scierc_format["relations"].append(relations_sent)
# Keep track of which entities and relations we've found and which we haven't.
entities_seen |= set(entities_sent["entity_id"])
entities_alignment |= set(aligned.keys())
entities_no_alignment |= set(missed.keys())
relations_found |= keys_found
# Update counts.
entities_missed = set(entities["entity_id"]) - entities_seen
relations_missed = set(relations.keys()) - relations_found
COUNTS["entities_correct"] += len(entities_alignment)
COUNTS["entities_misaligned"] += len(entities_no_alignment)
COUNTS["entities_missed"] += len(entities_missed)
COUNTS["entities_total"] += len(entities)
COUNTS["relations_found"] += len(relations_found)
COUNTS["relations_missed"] += len(relations_missed)
COUNTS['relations_total'] += len(relations)
return scierc_format
def one_fold(fold):
directory = "data/chemprot"
print(f"Processing fold {fold}.")
raw_subdirectory = "raw_data/ChemProt_Corpus"
df_abstracts = pd.read_table(f"{directory}/{raw_subdirectory}/chemprot_{fold}/chemprot_{fold}_abstracts.tsv",
header=None, keep_default_na=False,
names=["doc_key", "title", "abstract"])
df_entities = pd.read_table(f"{directory}/{raw_subdirectory}/chemprot_{fold}/chemprot_{fold}_entities.tsv",
header=None, keep_default_na=False,
names=["doc_key", "entity_id", "label", "char_start", "char_end", "text"])
df_relations = pd.read_table(f"{directory}/{raw_subdirectory}/chemprot_{fold}/chemprot_{fold}_relations.tsv",
header=None, keep_default_na=False,
names=["doc_key", "cpr_group", "eval_type", "label", "arg1", "arg2"])
res = []
for _, abstract in tqdm(df_abstracts.iterrows(), total=len(df_abstracts)):
to_append = one_abstract(abstract, df_entities, df_relations)
res.append(to_append)
# Write to file.
name_out = f"{directory}/processed_data/{fold}.jsonl"
with open(name_out, "w") as f_out:
for line in res:
print(json.dumps(line), file=f_out)
####################
# Driver
COUNTS = Counter()
for fold in ["training", "development", "test"]:
one_fold(fold)
counts =
|
pd.Series(COUNTS)
|
pandas.Series
|
import torch
import numpy as np
import pandas as pd
import os.path as osp
from torchfm.dataset.avazu import AvazuDataset
from torchfm.dataset.criteo import CriteoDataset
from torchfm.dataset.movielens import MovieLens1MDataset, MovieLens20MDataset
class MovieLens1MAugmentedDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, user_path, movie_path, sep='::'):
rating_columns = ['UserID', 'MovieID', 'Rating', 'Timestamp']
user_columns = ['UserID', 'Gender', 'Age', 'Occupation', 'Zipcode']
movie_columns = ['MovieID', 'Title', 'Genres']
# data = pd.read_csv(dataset_path, sep=sep, engine=engine, header=header).to_numpy()[:, :3]
rating = pd.read_csv(dataset_path, sep=sep, engine='python', header=0, names=rating_columns)
user =
|
pd.read_csv(user_path, sep=sep, engine='python', header=0, names=user_columns)
|
pandas.read_csv
|
"""
Helper functions specifically for the
profile asimilation coupled with Obs study
"""
import os, sys
import numpy as np
import pandas as pd
import xarray
from scipy.interpolate import interp1d
# manually add a2e-mmc repos to PYTHONPATH if needed
module_path = os.path.join(os.environ['HOME'],'tools','a2e-mmc')
if module_path not in sys.path:
sys.path.append(module_path)
from mmctools.helper_functions import calc_wind, covariance, power_spectral_density, theta
# manually add NWTC/datatools repo to PYTHONPATH
module_path = os.path.join(os.environ['HOME'],'tools')
if module_path not in sys.path:
sys.path.append(module_path)
from datatools.SOWFA6.postProcessing.averaging import PlanarAverages
from datatools.SOWFA6.postProcessing.probes import Probe
from datatools.SOWFA6.postProcessing.sourceHistory import SourceHistory
from datatools import openfoam_util
# ----------------------
# Loading reference data
# ----------------------
def load_wrf_reference_data(fpath):
"""
Load WRF reference data
"""
# Load data with xarray
xa = xarray.open_dataset(fpath)
# Convert to pandas dataframe
wrf = xa.to_dataframe()
# Convert to standard names
wrf.rename({'U':'u','V':'v','W':'w','UST':'u*'},
axis='columns',inplace=True)
# Compute wind speed and wind direction
wrf['wspd'], wrf['wdir'] = calc_wind(wrf)
return wrf
def load_radar_reference_data(fpath):
"""
Load TTU radar reference data
"""
radar = pd.read_csv(fpath,parse_dates=True,index_col=['datetime','height'])
# Extract scan types 0 and 1
radar_scan0 = radar.loc[radar['scan_type']==0].copy()
radar_scan1 = radar.loc[radar['scan_type']==1].copy()
return radar_scan0, radar_scan1
def load_tower_reference_data(fpath):
"""
Load TTU tower 10-min reference data
"""
return pd.read_csv(fpath,parse_dates=True,index_col=['datetime','height'])
def load_tower_reference_spectra(fpath,times,heights,interval,window_size):
"""
Load TTU tower 1-Hz data and compute spectra
"""
# Load data
tower = pd.read_csv(fpath,parse_dates=True,index_col=['datetime','height'])
# Calculate some QoI
tower['wspd'], tower['wdir'] = calc_wind(tower)
tower['theta'] = theta(tower['Ts'],tower['p'])
# Interpolate data to specified heights
tower_hgt = interpolate_to_heights(tower,heights)
# Reindex if needed
tower_hgt = reindex_if_needed(tower_hgt)
# Compute spectra
tower_spectra = calc_spectra(tower_hgt,times,heights,interval,window_size)
return tower_spectra
# -------------------------------------------------
# Calculating statistics and quantities of interest
# -------------------------------------------------
def calc_QOIs(df):
"""
Calculate derived quantities (IN PLACE)
"""
df['wspd'],df['wdir'] = calc_wind(df)
df['u*'] = (df['uw']**2 + df['vw']**2)**0.25
df['TKE'] = 0.5*(df['uu'] + df['vv'] + df['ww'])
ang = np.arctan2(df['v'],df['u'])
df['TI'] = df['uu']*np.cos(ang)**2 + 2*df['uv']*np.sin(ang)*np.cos(ang) + df['vv']*np.sin(ang)**2
df['TI'] = np.sqrt(df['TI']) / df['wspd']
# ------------------------------
# Calculating turbulence spectra
# ------------------------------
def interpolate_to_heights(df,heights):
"""
Interpolate data in dataframe to specified heights
and return a new dataframe
"""
# Unstack to single height index (= most time-consuming operation)
unstacked = df.unstack(level='datetime')
# Interpolate to specified heights
f = interp1d(unstacked.index,unstacked,axis=0,fill_value='extrapolate')
for hgt in heights:
unstacked.loc[hgt] = f(hgt)
# Restack and set index
df_out = unstacked.loc[heights].stack().reset_index().set_index(['datetime','height']).sort_index()
return df_out
def reindex_if_needed(df,dt=None):
"""
Check whether timestamps are equidistant with step dt (in seconds). If dt is not
specified, dt is equal to the minimal timestep in the dataframe. If timestamps
are not equidistant, interpolate to equidistant time grid with step dt.
"""
dts = np.diff(df.index.get_level_values(0).unique())/pd.to_timedelta(1,'s')
# If dt not specified, take dt as the minimal timestep
if dt is None:
dt = np.min(dts)
if not np.allclose(dts,dt):
# df is missing some timestamps, which will cause a problem when computing spectra.
# therefore, we first reindex the dataframe
start = df.index.levels[0][0]
end = df.index.levels[0][-1]
new_index = pd.date_range(start,end,freq=pd.to_timedelta(dt,'s'),name='datetime')
return df.unstack().reindex(new_index).interpolate(method='index').stack()
else:
return df
def calc_spectra(df,times,heights,interval,window_size):
"""
Calculate spectra for a given number of times and heights
and return a new dataframe
"""
dflist = []
for tstart in times:
for height in heights:
spectra = power_spectral_density(df.xs(height,level='height'),
tstart=
|
pd.to_datetime(tstart)
|
pandas.to_datetime
|
from itertools import groupby
from collections import defaultdict
import pysam
import pandas as pd
import celescope.tools.utils as utils
from celescope.tools.count import Count, get_opts_count
class Count_capture_rna(Count):
def bam2table(self):
"""
read probe file
"""
probe_gene_count_dict = utils.genDict(dim=4, valType=int)
samfile = pysam.AlignmentFile(self.bam, "rb")
with open(self.count_detail_file, 'wt') as fh1:
fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n')
def keyfunc(x): return x.query_name.split('_', 1)[0]
for _, g in groupby(samfile, keyfunc):
gene_umi_dict = defaultdict(lambda: defaultdict(int))
for seg in g:
(barcode, umi, probe) = seg.query_name.split('_')[:3]
if probe != 'None':
probe_gene_count_dict[probe]['total'][barcode][umi] += 1
if seg.has_tag('XT'):
geneID = seg.get_tag('XT')
geneName = self.id_name[geneID]
probe_gene_count_dict[probe][geneName][barcode][umi] += 1
else:
probe_gene_count_dict[probe]['None'][barcode][umi] += 1
if not seg.has_tag('XT'):
continue
geneID = seg.get_tag('XT')
gene_umi_dict[geneID][umi] += 1
for gene_id in gene_umi_dict:
Count.correct_umi(gene_umi_dict[gene_id])
# output
for gene_id in gene_umi_dict:
for umi in gene_umi_dict[gene_id]:
fh1.write('%s\t%s\t%s\t%s\n' % (barcode, gene_id, umi,
gene_umi_dict[gene_id][umi]))
# out probe
row_list = []
for probe in probe_gene_count_dict:
for geneName in probe_gene_count_dict[probe]:
barcode_count = len(probe_gene_count_dict[probe][geneName])
umi_count = 0
read_count = 0
for barcode in probe_gene_count_dict[probe][geneName]:
for umi in probe_gene_count_dict[probe][geneName][barcode]:
umi_count += len( probe_gene_count_dict[probe][geneName][barcode])
read_count += probe_gene_count_dict[probe][geneName][barcode][umi]
row_list.append({
'probe': probe,
'gene': geneName,
'barcode_count': barcode_count,
'read_count': read_count,
'UMI_count': umi_count
})
df_probe = pd.DataFrame(row_list,
columns=['probe', 'gene', 'barcode_count', 'read_count', 'UMI_count'])
df_probe = df_probe.groupby(['probe']).apply(
lambda x: x.sort_values('UMI_count', ascending=False)
)
return df_probe
def run(self):
df_probe = self.bam2table()
df_probe.to_csv(f'{self.outdir}/{self.sample}_probe_gene_count.tsv', sep='\t', index=False)
df =
|
pd.read_table(self.count_detail_file, header=0)
|
pandas.read_table
|
"""
By <NAME>
nickc1.github.io
Functions to query the NDBC (http://www.ndbc.noaa.gov/).
The realtime data for all of their buoys can be found at:
http://www.ndbc.noaa.gov/data/realtime2/
Info about all of noaa data can be found at:
http://www.ndbc.noaa.gov/docs/ndbc_web_data_guide.pdf
What all the values mean:
http://www.ndbc.noaa.gov/measdes.shtml
Each buoy has the data:
File Parameters
---- ----------
.data_spec Raw Spectral Wave Data
.ocean Oceanographic Data
.spec Spectral Wave Summary Data
.supl Supplemental Measurements Data
.swdir Spectral Wave Data (alpha1)
.swdir2 Spectral Wave Data (alpha2)
.swr1 Spectral Wave Data (r1)
.swr2 Spectral Wave Data (r2)
.txt Standard Meteorological Data
Example:
import buoypy as bp
# Get the last 45 days of data
rt = bp.realtime(41013) #frying pan shoals buoy
ocean_data = rt.get_ocean() #get Oceanographic data
wave_data.head()
Out[7]:
WVHT SwH SwP WWH WWP SwD WWD STEEPNESS APD MWD
2016-02-04 17:42:00 1.6 1.3 7.1 0.9 4.5 S S STEEP 5.3 169
2016-02-04 16:42:00 1.7 1.5 7.7 0.9 5.0 S S STEEP 5.4 174
2016-02-04 15:41:00 2.0 0.0 NaN 2.0 7.1 NaN S STEEP 5.3 174
2016-02-04 14:41:00 2.0 1.2 7.7 1.5 5.9 SSE SSE STEEP 5.5 167
2016-02-04 13:41:00 2.0 1.7 7.1 0.9 4.8 S SSE STEEP 5.7 175
TODO:
Make functions with except statements always spit out the same
column headings.
"""
import pandas as pd
import numpy as np
import datetime
class realtime:
def __init__(self, buoy):
self.link = 'http://www.ndbc.noaa.gov/data/realtime2/{}'.format(buoy)
def data_spec(self):
"""
Get the raw spectral wave data from the buoy. The seperation
frequency is dropped to keep the data clean.
Parameters
----------
buoy : string
Buoy number ex: '41013' is off wilmington, nc
Returns
-------
df : pandas dataframe (date, frequency)
data frame containing the raw spectral data. index is the date
and the columns are each of the frequencies
"""
link = "{}.{}".format(self.link, 'data_spec')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, skiprows=1, header=None,
parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,1::2]
freqs = df.iloc[0,2::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def ocean(self):
"""
Retrieve oceanic data. For the buoys explored,
O2%, O2PPM, CLCON, TURB, PH, EH were always NaNs
Returns
-------
df : pandas dataframe
Index is the date and columns are:
DEPTH m
OTMP degc
COND mS/cm
SAL PSU
O2% %
02PPM ppm
CLCON ug/l
TURB FTU
PH -
EH mv
"""
link = "{}.{}".format(self.link, 'ocean')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
#units are in the second row drop them
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['DEPTH','OTMP','COND','SAL']
df[cols] = df[cols].astype(float)
return df
def spec(self):
"""
Get the spectral wave data from the ndbc. Something is wrong with
the data for this parameter. The columns seem to change randomly.
Refreshing the data page will yield different column names from
minute to minute.
parameters
----------
buoy : string
Buoy number ex: '41013' is off wilmington, nc
Returns
-------
df : pandas dataframe
data frame containing the spectral data. index is the date
and the columns are:
HO, SwH, SwP, WWH, WWP, SwD, WWD, STEEPNESS, AVP, MWD
OR
WVHT SwH SwP WWH WWP SwD WWD STEEPNESS APD MWD
"""
link = "{}.{}".format(self.link, 'spec')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
try:
#units are in the second row drop them
#df.columns = df.columns + '('+ df.iloc[0] + ')'
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['WVHT','SwH','SwP','WWH','WWP','APD','MWD']
df[cols] = df[cols].astype(float)
except:
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['H0','SwH','SwP','WWH','WWP','AVP','MWD']
df[cols] = df[cols].astype(float)
return df
def supl(self):
"""
Get supplemental data
Returns
-------
data frame containing the spectral data. index is the date
and the columns are:
PRES hpa
PTIME hhmm
WSPD m/s
WDIR degT
WTIME hhmm
"""
link = "{}.{}".format(self.link, 'supl')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
#units are in the second row drop them
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['PRES','PTIME','WSPD','WDIR','WTIME']
df[cols] = df[cols].astype(float)
return df
def swdir(self):
"""
Spectral wave data for alpha 1.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swdir')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,na_values=999,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def swdir2(self):
"""
Spectral wave data for alpha 2.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swdir2')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def swr1(self):
"""
Spectral wave data for r1.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swr1')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def swr2(self):
"""
Spectral wave data for r2.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swr2')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link,delim_whitespace=True,skiprows=1,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,0::2]
freqs = df.iloc[0,1::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def txt(self):
"""
Retrieve standard Meteorological data. NDBC seems to be updating
the data with different column names, so this metric can return
two possible data frames with different column names:
Returns
-------
df : pandas dataframe
Index is the date and the columns can be:
['WDIR','WSPD','GST','WVHT','DPD','APD','MWD',
'PRES','ATMP','WTMP','DEWP','VIS','PTDY','TIDE']
or
['WD','WSPD','GST','WVHT','DPD','APD','MWD','BARO',
'ATMP','WTMP','DEWP','VIS','PTDY','TIDE']
"""
link = "{}.{}".format(self.link, 'txt')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
try:
#first column is units, so drop it
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['WDIR','WSPD','GST','WVHT','DPD','APD','MWD',
'PRES','ATMP','WTMP','DEWP','VIS','PTDY','TIDE']
df[cols] = df[cols].astype(float)
except:
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['WD','WSPD','GST','WVHT','DPD','APD','MWD','BARO',
'ATMP','WTMP','DEWP','VIS','PTDY','TIDE']
df[cols] = df[cols].astype(float)
df.index.name='Date'
return df
################################################
################################################
class historic_data:
def __init__(self, buoy, year, year_range=None):
link = 'http://www.ndbc.noaa.gov/view_text_file.php?filename='
link += '{}h{}.txt.gz&dir=data/historical/'.format(buoy, year)
self.link = link
def get_stand_meteo(self,link = None):
'''
Standard Meteorological Data. Data header was changed in 2007. Thus
the need for the if statement below.
WDIR Wind direction (degrees clockwise from true N)
WSPD Wind speed (m/s) averaged over an eight-minute period
GST Peak 5 or 8 second gust speed (m/s)
WVHT Significant wave height (meters) is calculated as
the average of the highest one-third of all of the
wave heights during the 20-minute sampling period.
DPD Dominant wave period (seconds) is the period with the maximum wave energy.
APD Average wave period (seconds) of all waves during the 20-minute period.
MWD The direction from which the waves at the dominant period (DPD) are coming.
(degrees clockwise from true N)
PRES Sea level pressure (hPa).
ATMP Air temperature (Celsius).
WTMP Sea surface temperature (Celsius).
DEWP Dewpoint temperature
VIS Station visibility (nautical miles).
PTDY Pressure Tendency
TIDE The water level in feet above or below Mean Lower Low Water (MLLW).
'''
link = self.link + 'stdmet/'
#combine the first five date columns YY MM DD hh and make index
df = pd.read_csv(link, header=0, delim_whitespace=True, dtype=object,
na_values=[99,999,9999,99.,999.,9999.])
#2007 and on format
if df.iloc[0,0] =='#yr':
df = df.rename(columns={'#YY': 'YY'}) #get rid of hash
#make the indices
df.drop(0, inplace=True) #first row is units, so drop them
d = df.YY + ' ' + df.MM+ ' ' + df.DD + ' ' + df.hh + ' ' + df.mm
ind = pd.to_datetime(d, format="%Y %m %d %H %M")
df.index = ind
#drop useless columns and rename the ones we want
df.drop(['YY','MM','DD','hh','mm'], axis=1, inplace=True)
df.columns = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD', 'APD', 'MWD',
'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE']
#before 2006 to 2000
else:
date_str = df.YYYY + ' ' + df.MM + ' ' + df.DD + ' ' + df.hh
ind = pd.to_datetime(date_str,format="%Y %m %d %H")
df.index = ind
#some data has a minute column. Some doesn't.
if 'mm' in df.columns:
df.drop(['YYYY','MM','DD','hh','mm'], axis=1, inplace=True)
else:
df.drop(['YYYY','MM','DD','hh'], axis=1, inplace=True)
df.columns = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD', 'APD',
'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE']
# all data should be floats
df = df.astype('float')
return df
def get_all_stand_meteo(self):
"""
Retrieves all the standard meterological data. Calls get_stand_meteo.
It also checks to make sure that the years that were requested are
available. Data is not available for the same years at all the buoys.
Returns
-------
df : pandas dataframe
Contains all the data from all the years that were specified
in year_range.
"""
start,stop = self.year_range
#see what is on the NDBC so we only pull the years that are available
links = []
for ii in range(start,stop+1):
base = 'http://www.ndbc.noaa.gov/view_text_file.php?filename='
end = '.txt.gz&dir=data/historical/stdmet/'
link = base + str(self.buoy) + 'h' + str(ii) + end
try:
urllib2.urlopen(link)
links.append(link)
except:
print(str(ii) + ' not in records')
#need to also retrieve jan, feb, march, etc.
month = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
k = [1,2,3,4,5,6,7,8,9,'a','b','c'] #for the links
for ii in range(len(month)):
mid = '.txt.gz&dir=data/stdmet/'
link = base + str(self.buoy) + str(k[ii]) + '2016' + mid + str(month[ii]) +'/'
try:
urllib2.urlopen(link)
links.append(link)
except:
print(str(month[ii]) + '2016' + ' not in records')
print(link)
# start grabbing some data
df=
|
pd.DataFrame()
|
pandas.DataFrame
|
import pytest
import numpy as np
import pandas as pd
EXP_IDX = pd.MultiIndex(levels=[['model_a'], ['scen_a', 'scen_b']],
labels=[[0, 0], [0, 1]], names=['model', 'scenario'])
def test_set_meta_no_name(meta_df):
idx = pd.MultiIndex(levels=[['a_scenario'], ['a_model'], ['some_region']],
labels=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx)
pytest.raises(ValueError, meta_df.set_meta, s)
def test_set_meta_as_named_series(meta_df):
idx = pd.MultiIndex(levels=[['scen_a'], ['model_a'], ['some_region']],
labels=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx, name='meta_values')
meta_df.set_meta(s)
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(meta_df['meta_values'], exp)
def test_set_meta_as_unnamed_series(meta_df):
idx = pd.MultiIndex(levels=[['scen_a'], ['model_a'], ['some_region']],
labels=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx)
meta_df.set_meta(s, name='meta_values')
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(meta_df['meta_values'], exp)
def test_set_meta_non_unique_index_fail(meta_df):
idx = pd.MultiIndex(levels=[['model_a'], ['scen_a'], ['reg_a', 'reg_b']],
labels=[[0, 0], [0, 0], [0, 1]],
names=['model', 'scenario', 'region'])
s = pd.Series([0.4, 0.5], idx)
pytest.raises(ValueError, meta_df.set_meta, s)
def test_set_meta_non_existing_index_fail(meta_df):
idx = pd.MultiIndex(levels=[['model_a', 'fail_model'],
['scen_a', 'fail_scenario']],
labels=[[0, 1], [0, 1]], names=['model', 'scenario'])
s = pd.Series([0.4, 0.5], idx)
pytest.raises(ValueError, meta_df.set_meta, s)
def test_set_meta_by_df(meta_df):
df = pd.DataFrame([
['model_a', 'scen_a', 'some_region', 1],
], columns=['model', 'scenario', 'region', 'col'])
meta_df.set_meta(meta=0.3, name='meta_values', index=df)
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(meta_df['meta_values'], exp)
def test_set_meta_as_series(meta_df):
s = pd.Series([0.3, 0.4])
meta_df.set_meta(s, 'meta_series')
exp = pd.Series(data=[0.3, 0.4], index=EXP_IDX, name='meta_series')
pd.testing.assert_series_equal(meta_df['meta_series'], exp)
def test_set_meta_as_int(meta_df):
meta_df.set_meta(3.2, 'meta_int')
exp =
|
pd.Series(data=[3.2, 3.2], index=EXP_IDX, name='meta_int')
|
pandas.Series
|
import os
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.sas.sasreader import read_sas
# CSV versions of test xpt files were obtained using the R foreign library
# Numbers in a SAS xport file are always float64, so need to convert
# before making comparisons.
def numeric_as_float(data):
for v in data.columns:
if data[v].dtype is np.dtype("int64"):
data[v] = data[v].astype(np.float64)
class TestXport:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "sas", "data")
self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt")
self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt")
self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt")
self.file04 = os.path.join(self.dirpath, "paxraw_d_short.xpt")
with td.file_leak_context():
yield
@pytest.mark.slow
def test1_basic(self):
# Tests with DEMO_G.xpt (all numeric file)
# Compare to this
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
numeric_as_float(data_csv)
# Read full file
data = read_sas(self.file01, format="xport")
tm.assert_frame_equal(data, data_csv)
num_rows = data.shape[0]
# Test reading beyond end of file
with read_sas(self.file01, format="xport", iterator=True) as reader:
data = reader.read(num_rows + 100)
assert data.shape[0] == num_rows
# Test incremental read with `read` method.
with read_sas(self.file01, format="xport", iterator=True) as reader:
data = reader.read(10)
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
# Test incremental read with `get_chunk` method.
with read_sas(self.file01, format="xport", chunksize=10) as reader:
data = reader.get_chunk()
|
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
|
pandas._testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 12:59:05 2020
@author: <NAME>
Explanation: To change from categorical variable to 0/1 columns.
"""
import pandas as pd
df_train =
|
pd.read_csv("train.csv")
|
pandas.read_csv
|
""" Construct dataset """
import sys
import math
import pandas as pd
import numpy as np
import csv
def calc_gaps(station):
"""Calculate gaps in time series"""
df = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
df = df.set_index(['Date'])
df.index = pd.to_datetime(df.index)
dates = df.index.values
first_date = dates[0]
last_date = dates[-1]
print('Data from {0} to {1}'.format(first_date, last_date))
total_range = last_date - first_date
total_range_seconds = total_range / np.timedelta64(1, 's')
last_read_date = first_date
gaps = []
total_gap = 0;
for d in dates:
diff = d - last_read_date
seconds = diff / np.timedelta64(1, 's')
hours = diff / np.timedelta64(1, 'h')
if hours > 72: # met stations
# if hours > 24: # flow stations
total_gap = total_gap + seconds
gaps.append(seconds)
last_read_date = d
print('Number of gaps {0}'.format(len(gaps)))
years = math.floor(total_gap / 3600 / 24 / 365.25)
days = math.floor((total_gap / 3600 / 24 % 365.25))
print('Total gap {0} years'.format(total_gap / 3600 / 24 / 365.25))
print('Total gap {0} years {1} days'.format(years, days))
total_left = total_range_seconds - total_gap
years_left = math.floor(total_left / 3600 / 24 / 365.25)
days_left = math.floor((total_left / 3600 / 24 % 365.25))
print('Total left {0} years'.format(total_left / 3600 / 24 / 365.25))
print('Total left {0} years {1} days'.format(years_left, days_left))
# gap_file = '{0}-gaps.txt'.format(station)
# np.savetxt(gap_file, gaps, delimiter=',', fmt="%s")
def calc_histogram(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 10)].count()
i2 = df[(df['Value'] > 10) & (df['Value'] <= 50)].count()
i3 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i4 = df[(df['Value'] > 100) & (df['Value'] <= 200)].count()
i5 = df[(df['Value'] > 200) & (df['Value'] <= 300)].count()
i6 = df[(df['Value'] > 300) & (df['Value'] <= 400)].count()
i7 = df[(df['Value'] > 400) & (df['Value'] <= 500)].count()
i8 = df[(df['Value'] > 500) & (df['Value'] <= 1000)].count()
i9 = df[(df['Value'] > 1000)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 10: {0}'.format(i1['Value']/total_count['Value']))
print(' 10 - 50: {0}'.format(i2['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i3['Value']/total_count['Value']))
print('100 - 200: {0}'.format(i4['Value']/total_count['Value']))
print('200 - 300: {0}'.format(i5['Value']/total_count['Value']))
print('300 - 400: {0}'.format(i6['Value']/total_count['Value']))
print('400 - 500: {0}'.format(i7['Value']/total_count['Value']))
print('500 - 1000: {0}'.format(i8['Value']/total_count['Value']))
print(' > 1000: {0}'.format(i9['Value']/total_count['Value']))
def calc_histogram4(station1, station2):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram3(station1, station2, station3):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index =
|
pd.to_datetime(raw2.index)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/schools8_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W_0ED20uQKha"
# In this notebook, we fit a hierarchical Bayesian model to the "8 schools" dataset.
# See also https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# + id="HXRokZL1QPvB"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="C5EHDB-rQSIa" colab={"base_uri": "https://localhost:8080/"} outputId="d6d8b024-96ba-4014-97d9-ddef6d88349e"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import theano.tensor as tt
import theano
# #!pip install arviz
import arviz as az
# + id="sKlvHNY6RUaP"
# !mkdir ../figures
# + [markdown] id="-jby_J17HqBT"
# # Data
# + id="8pNC3UANQjeO" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="8f91ec2e-e81b-452b-dcf7-8c9f6ddda82a"
# https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
print(np.mean(y))
print(np.median(y))
names=[];
for t in range(8):
names.append('{}'.format(t));
# Plot raw data
fig, ax = plt.subplots()
y_pos = np.arange(8)
ax.errorbar(y,y_pos, xerr=sigma, fmt='o')
ax.set_yticks(y_pos)
ax.set_yticklabels(names)
ax.invert_yaxis() # labels read top-to-bottom
plt.title('8 schools')
plt.savefig('../figures/schools8_data.png')
plt.show()
# + [markdown] id="vcAdKbnXHsKE"
# # Centered model
# + id="-Lxa_JgfQmAI" colab={"base_uri": "https://localhost:8080/", "height": 723} outputId="573cdde1-a178-4949-de75-af036d02f6dd"
# Centered model
with pm.Model() as Centered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with Centered_eight:
trace_centered = pm.sample(1000, chains=4, return_inferencedata=False)
pm.summary(trace_centered).round(2)
# PyMC3 gives multiple warnings about divergences
# Also, see r_hat ~ 1.01, ESS << nchains*1000, especially for sigma_alpha
# We can solve these problems below by using a non-centered parameterization.
# In practice, for this model, the results are very similar.
# + id="pOrDPo_lQob_" colab={"base_uri": "https://localhost:8080/"} outputId="0cbd7421-2754-43c2-a468-7250ae30b8d1"
# Display the total number and percentage of divergent chains
diverging = trace_centered['diverging']
print('Number of Divergent Chains: {}'.format(diverging.nonzero()[0].size))
diverging_pct = diverging.nonzero()[0].size / len(trace_centered) * 100
print('Percentage of Divergent Chains: {:.1f}'.format(diverging_pct))
# + id="bYbhbC-kT8GV" outputId="77b27048-57ad-456c-f6ea-7bbeee7d1d94" colab={"base_uri": "https://localhost:8080/"}
dir(trace_centered)
# + id="9ODVo7cLUKs8" outputId="505c9b7c-6b7f-4b12-be22-c67809d19641" colab={"base_uri": "https://localhost:8080/"}
trace_centered.varnames
# + id="gClLFgqHVuW1" outputId="7447a76c-0e85-4d11-ca0a-fd24babe57dd" colab={"base_uri": "https://localhost:8080/", "height": 356}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_centered_acf_combined.png', dpi=300)
# + id="uWPD88BxTkMj" outputId="ed94b053-2ebc-41f1-91c3-12f0d7eec423" colab={"base_uri": "https://localhost:8080/", "height": 452}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha']);
plt.savefig('schools8_centered_acf.png', dpi=300)
# + id="Uv1QEiQOQtGc" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="7ce96252-9002-4f18-a64c-c55046f5415d"
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=True);
plt.savefig('schools8_centered_forest_combined.png', dpi=300)
# + id="cgzmwxVGZxub" outputId="8979ca4c-d9df-43bb-847e-bad33b2258bb" colab={"base_uri": "https://localhost:8080/", "height": 542}
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=False);
plt.savefig('schools8_centered_forest.png', dpi=300)
# + [markdown] id="BkphbYr_HxOj"
# # Non-centered
# + id="jLFiQS0ZQvR4" colab={"base_uri": "https://localhost:8080/", "height": 905} outputId="8c0caa4b-4aa4-4685-f8ef-ef23ba60b82c"
# Non-centered parameterization
with pm.Model() as NonCentered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha_offset = pm.Normal('alpha_offset', mu=0, sigma=1, shape=J)
alpha = pm.Deterministic('alpha', mu_alpha + sigma_alpha * alpha_offset)
#alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with NonCentered_eight:
trace_noncentered = pm.sample(1000, chains=4)
pm.summary(trace_noncentered).round(2)
# Samples look good: r_hat = 1, ESS ~= nchains*1000
# + id="RyB5Qu-MQxuM" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="4a21b628-5b80-4ae4-a148-a208f33d6d43"
with NonCentered_eight:
az.plot_autocorr(trace_noncentered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_noncentered_acf_combined.png', dpi=300)
# + id="JHmvYgsAQzuK" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="5ed95cc6-49b8-4bc6-acca-59f7c5f5c06b"
with NonCentered_eight:
az.plot_forest(trace_noncentered, var_names="alpha",
combined=True, hdi_prob=0.95);
plt.savefig('schools8_noncentered_forest_combined.png', dpi=300)
# + id="vb8tzwUhXlW0" colab={"base_uri": "https://localhost:8080/", "height": 568} outputId="efad1751-55c1-4d1d-97b8-198f67af8935"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha",
combined=True, hdi_prob=0.95);
plt.axvline(np.mean(y), color='k', linestyle='--')
# + id="JETMmNSuZUV7" colab={"base_uri": "https://localhost:8080/", "height": 647} outputId="835e3d2c-7874-41b5-d22e-d64e18fae9ab"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha", kind='ridgeplot',
combined=True, hdi_prob=0.95);
# + [markdown] id="Q_SYYgL0H13G"
# # Funnel of hell
# + id="E3CtP2kcT4s5" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="17af872c-3d56-48e6-be05-a5aab0b4aa39"
# Plot the "funnel of hell"
# Based on
# https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(trace_centered['mu_alpha'], name='mu_alpha')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log_sigma_alpha')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='log(sigma)');
#axs[0].axhline(0.01)
x = pd.Series(trace_noncentered['mu_alpha'], name='mu')
y =
|
pd.Series(trace_noncentered['log_sigma_alpha'], name='log_sigma_alpha')
|
pandas.Series
|
__version__ = '0.1.0'
# TODO:
# - automatic credential fetching. Renew stale creds.
# - change all to_exclude parameters to to_keep, so is explicit / can use in docs.
# - general handling of a name query versus URI (e.g. via a decorator)
# - need to remove albums artists are only contributers to, or add field?
import pandas as pd
from functools import partial
from itertools import chain
CONFIG_PATH = '~/.tidyspotify.yml'
COLS_EXCLUDE_TRACKS = ('artists', 'available_markets', 'external_urls', 'is_local', 'disc_number')
# Features that may be useful for analyses ----
FEATURES_TRACK = (
'acousticness', 'danceability', 'energy', 'instrumentalness',
'liveness', 'loudness', 'speechiness', 'valence', 'tempo', 'key',
'time_signature'
)
FEATURES_ALBUM = (
'album_popularity', 'release_date'
)
# Utils ---------------------------------------------------------------------------------
def is_uri(s):
return True
def exclude_fields(to_exclude, d):
return {k: v for k, v in d.items() if k not in to_exclude}
def keep_fields(to_keep, d):
return {k: d[k] for k in to_keep if k in d}
def row_filter(fields, exclude = True):
f = exclude_fields if exclude else keep_fields
return partial(f, frozenset(fields))
def prefix_merge(left, right, prefix, *args, **kwargs):
"""Merge two dataframes, but prefix rather than suffix shared cols"""
shared = set(left.columns).intersection(set(right.columns))
new_left = left.rename(columns = {x: prefix[0] + x for x in shared})
new_right = right.rename(columns = {x: prefix[1] + x for x in shared})
return new_left.merge(new_right, *args, **kwargs)
# Client --------------------------------------------------------------------------------
# note: this currently takes the hacky approach of having all functions use the
# client (sp) defined here
import spotipy
from spotipy import SpotifyException
from spotipy.oauth2 import SpotifyClientCredentials
from functools import wraps
from pathlib import Path
import yaml
import os
default_client = None
class FileCredentialManager(SpotifyClientCredentials):
def __init__(self, client_id=None, client_secret=None, proxies=None):
"""Try Authenticating in this order:
1. client_id and secret args
2. SPOTIPY_CLIENT_ID and SPOTIPY_CLIENT_SECRET enivronment variables
3. ~/.tidyspotify.yml config file
"""
p = Path(CONFIG_PATH).expanduser()
if client_id or client_secret or os.environ.get('SPOTIPY_CLIENT_ID'):
return super().__init__(client_id, client_secret, proxies)
elif p.exists():
config = yaml.load(p.open())
return super().__init__(proxies = proxies, **config)
else:
return super().__init__()
def save_credentials(verify = True):
"""Login to spotify api. Saves credentials to file."""
config = {
'client_id': input('client id: '),
'client_secret': input('client secret: ')
}
if verify:
sp = spotipy.Spotify(client_credentials_manager = SpotifyClientCredentials(**config))
try:
sp.search("The Beatles")
except SpotifyException as e:
# TODO: informative message
raise
path = Path(CONFIG_PATH).expanduser()
print("Writing credentials to %s" % path.absolute())
yaml.dump(config, path.open('w'), default_flow_style = False)
def default_login():
global default_client
client_credentials_manager = FileCredentialManager()
default_client = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
return default_client
def login(f):
@wraps(f)
def wrapper(*args, client = None, **kwargs):
if client is None:
client = default_login() if default_client is None else default_client
return f(*args, client = default_client, **kwargs)
return wrapper
# API Main Entrypoints -------------------------------------------------------------------
@login
def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):
"""Return DataFrame with artist, album, and track data.
Parameters:
q: An artist to retrieve data for.
interactive: If true, prompts you to choose between similar artists.
genre_delimiter: Collapse genre column into strings like "genre1-!!-genre2".
If set to None, keeps genres as a list for each row.
"""
query = client.search(q = q, type = "artist")
items = query['artists']['items']
if not items:
raise Exception("No artists found")
if interactive:
print("Select the artist to use...")
print("\n".join("[{}]: {}".format(ii, entry['name']) for ii, entry in enumerate(items)))
artist_indx = int(input("artist number: ").strip())
if artist_indx > len(items):
raise IndexError("Selected number higher than options available")
artist = items[artist_indx]
else:
artist = items[0]
# get artist genres
artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None
# get artist albums
albums = get_artist_albums(artist['id'])
albums['artist_genres'] = artist_genres
# get album popularity
album_popularity = get_album_popularity(albums.id)
# get album tracks
tracks = get_album_tracks(albums.id)
# get track audio features
features = get_track_features(tracks.id)
# get track popularity
popularity = get_track_popularity(tracks.id)
album_data = albums.merge(album_popularity, 'left', 'id')
track_data = tracks \
.drop(columns = ['type']) \
.merge(popularity, 'left', 'id') \
.merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')
merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')
if to_file:
merged.to_csv(to_file)
return merged
@login
def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):
"""Return DataFrame of recommended tracks.
Arguments:
artists: an optional sequence of artists to seed recommendation
genres: an optional sequence of genres to seed recommendation
limit: number of tracks to return
features: whether to include track features in output
"""
recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)
tracks = recs['tracks']
# TODO: need a compose function...
to_keep = (
'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',
'explicit', 'id'
)
rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))
out = pd.DataFrame(rows)
track_ids = [row['id'] for row in rows]
if features:
extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']
return out.merge(
get_track_features(track_ids).drop(columns = extra_cols),
on = "id"
)
return out
def _hoist_track_info(track):
"""Mutates track with artist and album info at top level."""
track['album_name'] = track['album']['name']
artist = track['artists'][0]
track['artist_name'] = artist['name']
return track
@login
def get_recommendation_genre_seeds(client = None):
"""Return genres that can be used in get_recommendations"""
return client.recommendation_genre_seeds()['genres']
# API Functions -------------------------------------------------------------------------
@login
def get_artist_albums(
artist_id,
to_exclude = ('available_markets', 'artists', 'external_urls', 'href',
'images', 'type', 'uri', 'release_date_precision',
'album_group', 'total_tracks'),
to_df = True,
client = None):
"""Return albums belonging to an artist.
Arguments:
artist_id: artist uri or an artist name to search
to_exclude: fields to exclude from each row of data
to_df: return a DataFrame rather than a list
"""
# artist_name artist_uri album_uri album_name album_img album_type is_collaboration
if not is_uri(artist_id):
query = client.search(q = artist_id, type = "artist")
items = query['artists']['items']
if not items:
raise Exception("No artist matching search: %s" %artist_id)
artist_id = items[0]['id']
# TODO: pass args?
albums = client.artist_albums(artist_id)
row_filter(['id'])
items = albums['items']
for entry in items:
artist = entry['artists'][0]
entry['artist_name'] = artist['name']
entry['artist_uri'] = artist['uri']
data = list(map(row_filter(to_exclude), items))
return pd.DataFrame(data) if to_df else data
@login
def get_album_popularity(album_ids, to_df = True, client = None):
query = client.albums(album_ids)
data = list(map(row_filter(['id','popularity'], exclude = False), query['albums']))
return pd.DataFrame(data) if to_df else data
@login
def get_album_tracks(
album_ids,
to_exclude = COLS_EXCLUDE_TRACKS,
to_df = True,
client = None
):
items = chain.from_iterable(map(lambda x: _get_album_tracks(x, client), album_ids))
rows = list(map(row_filter(to_exclude), items))
return pd.DataFrame(rows) if to_df else rows
def _get_album_tracks(album_id, client):
items = client.album_tracks(album_id)['items']
for item in items:
item['album_id'] = album_id
yield item
@login
def get_track_features(track_ids, to_df = True, client = None):
tracks = []
for ii in range(0, len(track_ids), 99):
tracks.extend(client.audio_features(track_ids[ii:ii+99]))
return
|
pd.DataFrame(tracks)
|
pandas.DataFrame
|
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with
|
tm.assert_produces_warning(FutureWarning)
|
pandas._testing.assert_produces_warning
|
# Copyright 2020 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import six
import json
import requests
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
from quantrocket.cli.utils.stream import to_bytes
from quantrocket.cli.utils.files import write_response_to_filepath_or_buffer
from quantrocket.exceptions import ParameterError, NoMasterData
def list_ibkr_exchanges(regions=None, sec_types=None):
"""
List exchanges by security type and country as found on the IBKR website.
Parameters
----------
regions : list of str, optional
limit to these regions. Possible choices: north_america, europe, asia, global
sec_types : list of str, optional
limit to these securitiy types. Possible choices: STK, ETF, FUT, CASH, IND
Returns
-------
dict
"""
params = {}
if sec_types:
params["sec_types"] = sec_types
if regions:
params["regions"] = regions
response = houston.get("/master/exchanges/ibkr", params=params, timeout=180)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_ibkr_exchanges(*args, **kwargs):
return json_to_cli(list_ibkr_exchanges, *args, **kwargs)
def collect_alpaca_listings():
"""
Collect securities listings from Alpaca and store in securities master
database.
Returns
-------
dict
status message
"""
response = houston.post("/master/securities/alpaca")
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_alpaca_listings(*args, **kwargs):
return json_to_cli(collect_alpaca_listings, *args, **kwargs)
def collect_edi_listings(exchanges=None):
"""
Collect securities listings from EDI and store in securities master
database.
Parameters
----------
exchanges : list or str, required
collect listings for these exchanges (identified by MICs)
Returns
-------
dict
status message
Examples
--------
Collect sample listings:
>>> collect_edi_listings(exchanges="FREE")
Collect listings for all permitted exchanges:
>>> collect_edi_listings()
Collect all Chinese stock listings:
>>> collect_edi_listings(exchanges=["XSHG", "XSHE"])
"""
params = {}
if exchanges:
params["exchanges"] = exchanges
response = houston.post("/master/securities/edi", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_edi_listings(*args, **kwargs):
return json_to_cli(collect_edi_listings, *args, **kwargs)
def collect_figi_listings():
"""
Collect securities listings from Bloomberg OpenFIGI and store
in securities master database.
OpenFIGI provides several useful security attributes including
market sector, a detailed security type, and share class-level
FIGI identifier.
The collected data fields show up in the master file with the
prefix "figi_*".
This function does not directly query the OpenFIGI API but rather
downloads a dump of all FIGIs which QuantRocket has previously
mapped to securities from other vendors.
Returns
-------
dict
status message
Examples
--------
Collect all available FIGI listings:
>>> collect_figi_listings()
"""
response = houston.post("/master/securities/figi")
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_figi_listings(*args, **kwargs):
return json_to_cli(collect_figi_listings, *args, **kwargs)
def collect_ibkr_listings(exchanges=None, sec_types=None, currencies=None,
symbols=None, universes=None, sids=None):
"""
Collect securities listings from Interactive Brokers and store in
securities master database.
Specify an exchange (optionally filtering by security type, currency,
and/or symbol) to collect listings from the IBKR website and collect
associated contract details from the IBKR API. Or, specify universes or
sids to collect details from the IBKR API, bypassing the website.
Parameters
----------
exchanges : list or str
one or more IBKR exchange codes to collect listings for (required
unless providing universes or sids). For sample data use exchange
code 'FREE'
sec_types : list of str, optional
limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND
currencies : list of str, optional
limit to these currencies
symbols : list of str, optional
limit to these symbols
universes : list of str, optional
limit to these universes
sids : list of str, optional
limit to these sids
Returns
-------
dict
status message
Examples
--------
Collect free sample listings:
>>> collect_ibkr_listings(exchanges="FREE")
Collect all Toronto Stock Exchange stock listings:
>>> collect_ibkr_listings(exchanges="TSE", sec_types="STK")
Collect all NYSE ARCA ETF listings:
>>> collect_ibkr_listings(exchanges="ARCA", sec_types="ETF")
Collect specific symbols from Nasdaq:
>>> collect_ibkr_listings(exchanges="NASDAQ", symbols=["AAPL", "GOOG", "NFLX"])
Re-collect contract details for an existing universe called "japan-fin":
>>> collect_ibkr_listings(universes="japan-fin")
"""
params = {}
if exchanges:
params["exchanges"] = exchanges
if sec_types:
params["sec_types"] = sec_types
if currencies:
params["currencies"] = currencies
if symbols:
params["symbols"] = symbols
if universes:
params["universes"] = universes
if sids:
params["sids"] = sids
response = houston.post("/master/securities/ibkr", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_ibkr_listings(*args, **kwargs):
return json_to_cli(collect_ibkr_listings, *args, **kwargs)
def collect_sharadar_listings(countries="US"):
"""
Collect securities listings from Sharadar and store in securities master
database.
Parameters
----------
countries : list of str, required
countries to collect listings for. Possible choices: US, FREE
Returns
-------
dict
status message
"""
params = {}
if countries:
params["countries"] = countries
response = houston.post("/master/securities/sharadar", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_sharadar_listings(*args, **kwargs):
return json_to_cli(collect_sharadar_listings, *args, **kwargs)
def collect_usstock_listings():
"""
Collect US stock listings from QuantRocket and store in securities
master database.
Returns
-------
dict
status message
"""
response = houston.post("/master/securities/usstock")
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_usstock_listings(*args, **kwargs):
return json_to_cli(collect_usstock_listings, *args, **kwargs)
def collect_ibkr_option_chains(universes=None, sids=None, infilepath_or_buffer=None):
"""
Collect IBKR option chains for underlying securities.
Note: option chains often consist of hundreds, sometimes thousands of
options per underlying security. Be aware that requesting option chains
for large universes of underlying securities, such as all stocks on the
NYSE, can take numerous hours to complete.
Parameters
----------
universes : list of str, optional
collect options for these universes of underlying securities
sids : list of str, optional
collect options for these underlying sids
infilepath_or_buffer : str or file-like object, optional
collect options for the sids in this file (specify '-' to read file
from stdin)
Returns
-------
dict
status message
"""
params = {}
if universes:
params["universes"] = universes
if sids:
params["sids"] = sids
if infilepath_or_buffer == "-":
response = houston.post("/master/options/ibkr", params=params, data=to_bytes(sys.stdin))
elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
if infilepath_or_buffer.seekable():
infilepath_or_buffer.seek(0)
response = houston.post("/master/options/ibkr", params=params, data=to_bytes(infilepath_or_buffer))
elif infilepath_or_buffer:
with open(infilepath_or_buffer, "rb") as f:
response = houston.post("/master/options/ibkr", params=params, data=f)
else:
response = houston.post("/master/options/ibkr", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_ibkr_option_chains(*args, **kwargs):
return json_to_cli(collect_ibkr_option_chains, *args, **kwargs)
def diff_ibkr_securities(universes=None, sids=None, infilepath_or_buffer=None,
fields=None, delist_missing=False, delist_exchanges=None,
wait=False):
"""
Flag security details that have changed in IBKR's system since the time they
were last collected into the securities master database.
Diff can be run synchronously or asynchronously (asynchronous is the default
and is recommended if diffing more than a handful of securities).
Parameters
----------
universes : list of str, optional
limit to these universes
sids : list of str, optional
limit to these sids
infilepath_or_buffer : str or file-like object, optional
limit to the sids in this file (specify '-' to read file from stdin)
fields : list of str, optional
only diff these fields (field name should start with "ibkr")
delist_missing : bool
auto-delist securities that are no longer available from IBKR
delist_exchanges : list of str, optional
auto-delist securities that are associated with these exchanges
wait : bool
run the diff synchronously and return the diff (otherwise run
asynchronously and log the results, if any, to flightlog)
Returns
-------
dict
dict of sids and fields that have changed (if wait), or status message
"""
params = {}
if universes:
params["universes"] = universes
if sids:
params["sids"] = sids
if fields:
params["fields"] = fields
if delist_missing:
params["delist_missing"] = delist_missing
if delist_exchanges:
params["delist_exchanges"] = delist_exchanges
if wait:
params["wait"] = wait
# if run synchronously use a high timeout
timeout = 60*60*10 if wait else None
if infilepath_or_buffer == "-":
response = houston.get("/master/diff/ibkr", params=params, data=to_bytes(sys.stdin), timeout=timeout)
elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
if infilepath_or_buffer.seekable():
infilepath_or_buffer.seek(0)
response = houston.get("/master/diff/ibkr", params=params, data=to_bytes(infilepath_or_buffer), timeout=timeout)
elif infilepath_or_buffer:
with open(infilepath_or_buffer, "rb") as f:
response = houston.get("/master/diff/ibkr", params=params, data=f, timeout=timeout)
else:
response = houston.get("/master/diff/ibkr", params=params, timeout=timeout)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_diff_ibkr_securities(*args, **kwargs):
return json_to_cli(diff_ibkr_securities, *args, **kwargs)
def download_master_file(filepath_or_buffer=None, output="csv", exchanges=None, sec_types=None,
currencies=None, universes=None, symbols=None, sids=None,
exclude_universes=None, exclude_sids=None,
exclude_delisted=False, exclude_expired=False,
frontmonth=False, vendors=None, fields=None):
"""
Query security details from the securities master database and download to file.
Parameters
----------
filepath_or_buffer : str or file-like object
filepath to write the data to, or file-like object (defaults to stdout)
output : str
output format (csv or json, default is csv)
exchanges : list of str, optional
limit to these exchanges. You can specify exchanges using the MIC or the
vendor's exchange code.
sec_types : list of str, optional
limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND, OPT, FOP, BAG
currencies : list of str, optional
limit to these currencies
universes : list of str, optional
limit to these universes
symbols : list of str, optional
limit to these symbols
sids : list of str, optional
limit to these sids
exclude_universes : list of str, optional
exclude these universes
exclude_sids : list of str, optional
exclude these sids
exclude_delisted : bool
exclude delisted securities (default is to include them)
exclude_expired : bool
exclude expired contracts (default is to include them)
frontmonth : bool
exclude backmonth and expired futures contracts (default False)
vendors : list of str, optional
limit to these vendors. Possible choices: alpaca, edi, ibkr,
sharadar, usstock
fields : list of str, optional
Return specific fields. By default a core set of fields is
returned, but additional vendor-specific fields are also available.
To return non-core fields, you can reference them by name, or pass "*"
to return all available fields. To return all fields for a specific
vendor, pass the vendor prefix followed by "*", for example "edi*"
for all EDI fields. Pass "?*" (or any invalid vendor prefix plus "*")
to see available vendor prefixes. Pass "?" or any invalid fieldname
to see all available fields.
Returns
-------
None
Notes
-----
Parameters for filtering query results are combined according to the following
rules. First, the master service determines what to include in the result set,
based on the inclusion filters: `exchanges`, `sec_types`, `currencies`, `universes`,
`symbols`, and `sids`. With the exception of `sids`, these parameters are ANDed
together. That is, securities must satisfy all of the parameters to be included.
If `vendors` is provided, only those vendors are searched for the purpose of
determining matches.
The `sids` parameter is treated differently. Securities matching `sids` are always
included, regardless of whether they meet the other inclusion criteria.
After determining what to include, the master service then applies the exclusion
filters (`exclude_sids`, `exclude_universes`, `exclude_delisted`, `exclude_expired`,
and `frontmonth`) to determine what (if anything) should be removed from the result
set. Exclusion filters are ORed, that is, securities are excluded if they match any
of the exclusion criteria.
Examples
--------
Download NYSE and NASDAQ securities to file, using MICs to specify
the exchanges:
>>> download_master_file("securities.csv", exchanges=["XNYS","XNAS"])
Download NYSE and NASDAQ securities to file, using IBKR exchange codes
to specify the exchanges, and include all IBKR fields:
>>> download_master_file("securities.csv", exchanges=["NYSE","NASDAQ"], fields="ibkr*")
Download securities for a particular universe to in-memory file, including
all possible fields, and load the CSV into pandas.
>>> f = io.StringIO()
>>> download_master_file(f, fields="*", universes="my-universe")
>>> securities = pd.read_csv(f)
See Also
--------
quantrocket.master.get_securities : load securities into a DataFrame
"""
params = {}
if exchanges:
params["exchanges"] = exchanges
if sec_types:
params["sec_types"] = sec_types
if currencies:
params["currencies"] = currencies
if universes:
params["universes"] = universes
if symbols:
params["symbols"] = symbols
if sids:
params["sids"] = sids
if exclude_universes:
params["exclude_universes"] = exclude_universes
if exclude_sids:
params["exclude_sids"] = exclude_sids
if exclude_delisted:
params["exclude_delisted"] = exclude_delisted
if exclude_expired:
params["exclude_expired"] = exclude_expired
if frontmonth:
params["frontmonth"] = frontmonth
if vendors:
params["vendors"] = vendors
if fields:
params["fields"] = fields
output = output or "csv"
url = "/master/securities.{0}".format(output)
if output not in ("csv", "json"):
raise ValueError("Invalid ouput: {0}".format(output))
response = houston.get(url, params=params)
try:
houston.raise_for_status_with_json(response)
except requests.HTTPError as e:
# Raise a dedicated exception
if "no securities match the query parameters" in repr(e).lower():
raise NoMasterData(e)
raise
filepath_or_buffer = filepath_or_buffer or sys.stdout
write_response_to_filepath_or_buffer(filepath_or_buffer, response)
def _cli_download_master_file(*args, **kwargs):
return json_to_cli(download_master_file, *args, **kwargs)
def get_securities(symbols=None, exchanges=None, sec_types=None,
currencies=None, universes=None, sids=None,
exclude_universes=None, exclude_sids=None,
exclude_delisted=False, exclude_expired=False,
frontmonth=False, vendors=None, fields=None):
"""
Return a DataFrame of security details from the securities master database.
Parameters
----------
symbols : list of str, optional
limit to these symbols
exchanges : list of str, optional
limit to these exchanges. You can specify exchanges using the MIC or the
vendor's exchange code.
sec_types : list of str, optional
limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND, OPT, FOP, BAG
currencies : list of str, optional
limit to these currencies
universes : list of str, optional
limit to these universes
sids : list of str, optional
limit to these sids
exclude_universes : list of str, optional
exclude these universes
exclude_sids : list of str, optional
exclude these sids
exclude_delisted : bool
exclude delisted securities (default is to include them)
exclude_expired : bool
exclude expired contracts (default is to include them)
frontmonth : bool
exclude backmonth and expired futures contracts (default False)
vendors : list of str, optional
limit to these vendors. Possible choices: alpaca, edi, ibkr,
sharadar, usstock
fields : list of str, optional
Return specific fields. By default a core set of fields is
returned, but additional vendor-specific fields are also available.
To return non-core fields, you can reference them by name, or pass "*"
to return all available fields. To return all fields for a specific
vendor, pass the vendor prefix followed by "*", for example "edi*"
for all EDI fields. Pass "?*" (or any invalid vendor prefix plus "*")
to see available vendor prefixes. Pass "?" or any invalid fieldname
to see all available fields.
Returns
-------
DataFrame
a DataFrame of securities, with Sids as the index
Notes
-----
Parameters for filtering query results are combined according to the following
rules. First, the master service determines what to include in the result set,
based on the inclusion filters: `exchanges`, `sec_types`, `currencies`, `universes`,
`symbols`, and `sids`. With the exception of `sids`, these parameters are ANDed
together. That is, securities must satisfy all of the parameters to be included.
If `vendors` is provided, only those vendors are searched for the purpose of
determining matches.
The `sids` parameter is treated differently. Securities matching `sids` are always
included, regardless of whether they meet the other inclusion criteria.
After determining what to include, the master service then applies the exclusion
filters (`exclude_sids`, `exclude_universes`, `exclude_delisted`, `exclude_expired`,
and `frontmonth`) to determine what (if anything) should be removed from the result
set. Exclusion filters are ORed, that is, securities are excluded if they match any
of the exclusion criteria.
Examples
--------
Load default fields for NYSE and NASDAQ securities, using MICs to specify
the exchanges:
>>> securities = get_securities(exchanges=["XNYS","XNAS"])
Load sids for MSFT and AAPL:
>>> sids = get_securities(symbols=["MSFT", "AAPL"]).index.tolist()
Load NYSE and NASDAQ securities, using IBKR exchange codes to specify the
exchanges, and include all IBKR fields:
>>> securities = get_securities(exchanges=["NYSE","NASDAQ"], fields="ibkr*")
"""
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use this function")
f = six.StringIO()
download_master_file(
f, exchanges=exchanges, sec_types=sec_types,
currencies=currencies, universes=universes,
symbols=symbols, sids=sids,
exclude_universes=exclude_universes,
exclude_sids=exclude_sids,
exclude_delisted=exclude_delisted,
exclude_expired=exclude_expired, frontmonth=frontmonth,
vendors=vendors, fields=fields)
securities = pd.read_csv(f, index_col="Sid")
for col in securities.columns:
col_without_vendor_prefix = col.split("_")[-1]
if col_without_vendor_prefix in (
"Delisted", "Etf", "EasyToBorrow", "Marginable", "Tradable", "Shortable",
"IsPrimaryListing"):
securities[col] = securities[col].fillna(0).astype(bool)
elif (
col_without_vendor_prefix.endswith("Date")
or col_without_vendor_prefix.startswith("Date")
or col_without_vendor_prefix in (
"FirstAdded", "LastAdded", "RecordCreated", "RecordModified",
"LastUpdated", "FirstQuarter", "LastQuarter")):
# pd.to_datetime handles NaNs in earlier pandas versions (0.22)
# while .astype("datetime64[ns]") does not
securities[col] = pd.to_datetime(securities[col])
return securities
def get_securities_reindexed_like(reindex_like, fields=None):
"""
Return a multiindex DataFrame of securities master data, reindexed to
match the index and columns (sids) of `reindex_like`.
Parameters
----------
reindex_like : DataFrame, required
a DataFrame (usually of prices) with dates for the index and sids
for the columns, to which the shape of the resulting DataFrame will
be conformed
fields : list of str
a list of fields to include in the resulting DataFrame. By default a
core set of fields is returned, but additional vendor-specific fields
are also available. To return non-core fields, you can reference them
by name, or pass "*" to return all available fields. To return all
fields for a specific vendor, pass the vendor prefix followed by "*",
for example "edi*" for all EDI fields. Pass "?*" (or any invalid
vendor prefix plus "*") to see available vendor prefixes. Pass "?" or
any invalid fieldname to see all available fields.
Returns
-------
DataFrame
a multiindex (Field, Date) DataFrame of securities master data, shaped
like the input DataFrame
Examples
--------
Get exchanges (MICs) using a DataFrame of prices:
>>> closes = prices.loc["Close"]
>>> securities = get_securities_reindexed_like(
closes, fields=["Exchange"])
>>> exchanges = securities.loc["Exchange"]
>>> nyse_closes = closes.where(exchanges == "XNYS")
"""
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use this function")
sids = list(reindex_like.columns)
securities = get_securities(sids=sids, fields=fields)
if "Sid" in fields:
securities["Sid"] = securities.index
all_master_fields = {}
for col in sorted(securities.columns):
this_col = securities[col]
all_master_fields[col] = reindex_like.apply(lambda x: this_col, axis=1)
names = list(reindex_like.index.names)
names.insert(0, "Field")
securities = pd.concat(all_master_fields, names=names)
securities = securities.reindex(columns=reindex_like.columns)
return securities
def get_contract_nums_reindexed_like(reindex_like, limit=5):
"""
From a DataFrame of futures (with dates as the index and sids as columns),
return a DataFrame of integers representing each sid's sequence in the
futures chain as of each date, where 1 is the front contract, 2 is the second
nearest contract, etc.
Sequences are based on the RolloverDate field in the securities master
file, which is based on configurable rollover rules.
Parameters
----------
reindex_like : DataFrame, required
a DataFrame (usually of prices) with dates for the index and sids
for the columns, to which the shape of the resulting DataFrame will
be conformed
limit : int
how many contracts ahead to sequence. For example, assuming quarterly
contracts, a limit of 5 will sequence 5 quarters out. Default 5.
Returns
-------
DataFrame
a DataFrame of futures chain sequence numbers, shaped like the input
DataFrame
Examples
--------
Get a Boolean mask of front-month contracts:
>>> closes = prices.loc["Close"]
>>> contract_nums = get_contract_nums_reindexed_like(closes)
>>> are_front_months = contract_nums == 1
"""
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use this function")
index_levels = reindex_like.index.names
if "Date" not in index_levels:
raise ParameterError(
"reindex_like must have index called 'Date', but has {0}".format(
",".join([str(name) for name in index_levels])))
reindex_like_dt_index = reindex_like.index.get_level_values("Date")
if not hasattr(reindex_like_dt_index, "date"):
raise ParameterError("reindex_like must have a DatetimeIndex")
f = six.StringIO()
download_master_file(f, sids=list(reindex_like.columns),
fields=["RolloverDate","ibkr_UnderConId","SecType"])
rollover_dates = pd.read_csv(f, parse_dates=["RolloverDate"])
rollover_dates = rollover_dates[rollover_dates.SecType=="FUT"].drop("SecType", axis=1)
if rollover_dates.empty:
raise ParameterError("input DataFrame does not appear to contain any futures contracts")
if reindex_like_dt_index.tz:
rollover_dates.loc[:, "RolloverDate"] = rollover_dates.RolloverDate.dt.tz_localize(reindex_like_dt_index.tz.zone)
min_date = reindex_like_dt_index.min()
max_date = max([rollover_dates.RolloverDate.max(),
reindex_like_dt_index.max()])
# Stack sids by underlying (1 column per underlying)
rollover_dates = rollover_dates.set_index(["RolloverDate","ibkr_UnderConId"]).Sid.unstack()
contract_nums = None
for i in range(limit):
# backshift conids
_rollover_dates = rollover_dates.apply(lambda col: col.dropna().shift(-i))
# Reindex to daily frequency
_rollover_dates = _rollover_dates.reindex(
index=pd.date_range(start=min_date, end=max_date))
# RolloverDate is when we roll out of the contract, hence we backfill
_rollover_dates = _rollover_dates.fillna(method="bfill")
# Stack to Series of Date, nth sid
_rollover_dates = _rollover_dates.stack()
_rollover_dates.index = _rollover_dates.index.droplevel("ibkr_UnderConId")
_rollover_dates.index.name = "Date"
# Pivot Series to DataFrame
_rollover_dates = _rollover_dates.reset_index(name="Sid")
_rollover_dates["ContractNum"] = i + 1
_rollover_dates = _rollover_dates.set_index(["Date","Sid"])
_contract_nums = _rollover_dates.ContractNum.unstack()
# If MultiIndex input, broadcast across Time level
if len(index_levels) > 1:
_contract_nums = _contract_nums.reindex(index=reindex_like.index,
level="Date")
_contract_nums = _contract_nums.reindex(columns=reindex_like.columns)
else:
_contract_nums = _contract_nums.reindex(index=reindex_like_dt_index,
columns=reindex_like.columns)
if contract_nums is None:
contract_nums = _contract_nums
else:
contract_nums = contract_nums.fillna(_contract_nums)
return contract_nums
def create_universe(code, infilepath_or_buffer=None, sids=None, from_universes=None,
exclude_delisted=False, append=False, replace=False):
"""
Create a universe of securities.
Parameters
----------
code : str, required
the code to assign to the universe (lowercase alphanumerics and hyphens only)
infilepath_or_buffer : str or file-like object, optional
create the universe from the sids in this file (specify '-' to read file from stdin)
sids : list of str, optional
create the universe from these sids
from_universes : list of str, optional
create the universe from these existing universes
exclude_delisted : bool
exclude delisted securities and expired contracts that would otherwise be
included (default is not to exclude them)
append : bool
append to universe if universe already exists (default False)
replace : bool
replace universe if universe already exists (default False)
Returns
-------
dict
status message
Examples
--------
Create a universe called 'nyse-stk' from a CSV file:
>>> create_universe("usa-stk", "nyse_securities.csv")
Create a universe from a DataFrame of securities:
>>> securities = get_securities(exchanges="TSEJ")
>>> create_universe("japan-stk", sids=securities.index.tolist())
"""
if append and replace:
raise ValueError("append and replace are mutually exclusive")
params = {}
if from_universes:
params["from_universes"] = from_universes
if exclude_delisted:
params["exclude_delisted"] = exclude_delisted
if replace:
params["replace"] = replace
if sids:
params["sids"] = sids
url = "/master/universes/{0}".format(code)
if append:
method = "PATCH"
else:
method = "PUT"
if infilepath_or_buffer == "-":
response = houston.request(method, url, params=params, data=to_bytes(sys.stdin))
elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
if infilepath_or_buffer.seekable():
infilepath_or_buffer.seek(0)
response = houston.request(method, url, params=params, data=to_bytes(infilepath_or_buffer))
elif infilepath_or_buffer:
with open(infilepath_or_buffer, "rb") as f:
response = houston.request(method, url, params=params, data=f)
else:
response = houston.request(method, url, params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_create_universe(*args, **kwargs):
return json_to_cli(create_universe, *args, **kwargs)
def delete_universe(code):
"""
Delete a universe.
The listings details of the member securities won't be deleted, only
their grouping as a universe.
Parameters
----------
code : str, required
the universe code
Returns
-------
dict
status message
"""
url = "/master/universes/{0}".format(code)
response = houston.delete(url)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_delete_universe(*args, **kwargs):
return json_to_cli(delete_universe, *args, **kwargs)
def list_universes():
"""
List universes and their size.
Returns
-------
dict
dict of universe:size
"""
response = houston.get("/master/universes")
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_universes(*args, **kwargs):
return json_to_cli(list_universes, *args, **kwargs)
def delist_ibkr_security(sid=None, symbol=None, exchange=None, currency=None, sec_type=None):
"""
Mark an IBKR security as delisted.
This does not remove any data but simply marks the security as delisted so
that data services won't attempt to collect data for the security and so
that the security can be optionally excluded from query results.
The security can be specified by sid or a combination of other
parameters (for example, symbol + exchange). As a precaution, the request
will fail if the parameters match more than one security.
Parameters
----------
sid : str, optional
the sid of the security to be delisted
symbol : str, optional
the symbol to be delisted (if sid not provided)
exchange : str, optional
the exchange of the security to be delisted (if needed to disambiguate)
currency : str, optional
the currency of the security to be delisted (if needed to disambiguate)
sec_type : str, optional
the security type of the security to be delisted (if needed to disambiguate).
Possible choices: STK, ETF, FUT, CASH, IND
Returns
-------
dict
status message
"""
params = {}
if sid:
params["sids"] = sid
if symbol:
params["symbols"] = symbol
if exchange:
params["exchanges"] = exchange
if currency:
params["currencies"] = currency
if sec_type:
params["sec_types"] = sec_type
response = houston.delete("/master/securities/ibkr", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_delist_ibkr_security(*args, **kwargs):
return json_to_cli(delist_ibkr_security, *args, **kwargs)
def create_ibkr_combo(combo_legs):
"""
Create an IBKR combo (aka spread), which is a composite instrument consisting
of two or more individual instruments (legs) that are traded as a single
instrument.
Each user-defined combo is stored in the securities master database with a
SecType of "BAG". The combo legs are stored in the ComboLegs field as a JSON
array. QuantRocket assigns a sid for the combo consisting of a prefix 'IC'
followed by an autoincrementing digit, for example: IC1, IC2, IC3, ...
If the combo already exists, its sid will be returned instead of creating a
duplicate record.
Parameters
----------
combo_legs : list, required
a list of the combo legs, where each leg is a list specifying action, ratio,
and sid
Returns
-------
dict
returns a dict containing the generated sid of the combo, and whether a new
record was created
Examples
--------
To create a calendar spread on VX, first retrieve the sids of the legs:
>>> from quantrocket.master import download_master_file
>>> download_master_file("vx.csv", symbols="VIX", exchanges="CFE", sec_types="FUT")
>>> vx_sids = pd.read_csv("vx.csv", index_col="Symbol").Sid.to_dict()
Then create the combo:
>>> create_ibkr_combo([
["BUY", 1, vx_sids["VXV9"]],
["SELL", 1, vx_sids["VXQ9"]]
])
{"sid": IC1, "created": True}
"""
f = six.StringIO()
json.dump(combo_legs, f)
f.seek(0)
response = houston.put("/master/combos/ibkr", data=f)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_create_ibkr_combo(combo_filepath):
with open(combo_filepath) as f:
combo_legs = json.load(f)
return json_to_cli(create_ibkr_combo, combo_legs)
def load_rollrules_config(filename):
"""
Upload a new rollover rules config.
Parameters
----------
filename : str, required
the rollover rules YAML config file to upload
Returns
-------
dict
status message
"""
with open(filename) as file:
response = houston.put("/master/config/rollover", data=file.read())
houston.raise_for_status_with_json(response)
return response.json()
def get_rollrules_config():
"""
Returns the current rollover rules config.
Returns
-------
dict
the config as a dict
"""
response = houston.get("/master/config/rollover")
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def _cli_load_or_show_rollrules(filename=None):
if filename:
return json_to_cli(load_rollrules_config, filename)
else:
return json_to_cli(get_rollrules_config)
def collect_ibkr_calendar(exchanges=None):
"""
Collect upcoming trading hours from IBKR for exchanges and save to
securities master database.
Parameters
----------
exchanges : list of str, optional
limit to these exchanges
Returns
-------
dict
status message
"""
params = {}
if exchanges:
params["exchanges"] = exchanges
response = houston.post("/master/calendar/ibkr", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_collect_ibkr_calendar(*args, **kwargs):
return json_to_cli(collect_ibkr_calendar, *args, **kwargs)
def list_calendar_statuses(exchanges, sec_type=None, in_=None, ago=None, outside_rth=False):
"""
Check whether exchanges are open or closed.
Parameters
----------
exchanges : list of str, required
the exchange(s) to check
sec_type : str, optional
the security type, if needed to disambiguate for exchanges that
trade multiple security types. Possible choices: STK, FUT, CASH, OPT
in_ : str, optional
check whether exchanges will be open or closed at this point in the
future (use Pandas timedelta string, e.g. 2h or 30min or 1d)
ago : str, optional
check whether exchanges were open or closed this long ago
(use Pandas timedelta string, e.g. 2h or 30min or 1d)
outside_rth : bool
check extended hours calendar (default is to check regular
trading hours calendar)
Returns
-------
dict
exchange calendar status
"""
params = {}
if exchanges:
params["exchanges"] = exchanges
if sec_type:
params["sec_type"] = sec_type
if in_:
params["in"] = in_
if ago:
params["ago"] = ago
if outside_rth:
params["outside_rth"] = outside_rth
response = houston.get("/master/calendar", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_calendar_statuses(*args, **kwargs):
return json_to_cli(list_calendar_statuses, *args, **kwargs)
def _cli_in_status_since(status, since=None, in_=None, ago=None):
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use this command")
dt = pd.Timestamp.now(status["timezone"])
if in_:
dt += pd.Timedelta(in_)
elif ago:
dt -= pd.Timedelta(ago)
dt = dt.tz_localize(None)
required_since = pd.date_range(periods=5, end=dt,
freq=since, normalize=False)
# For >1D freq, normalize to midnight
if required_since.freq.is_anchored() or required_since.freq.rule_code == "D":
required_since = pd.date_range(periods=5, end=dt, freq=since, normalize=True)
required_since = required_since[-1]
else:
# If not normalized, the last value is dt, so use the penultimate value
required_since = required_since[-2]
actual_since = pd.Timestamp(status["since"])
return actual_since <= required_since
def _cli_in_status_until(status, until=None, in_=None, ago=None):
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use this command")
dt = pd.Timestamp.now(status["timezone"])
if in_:
dt += pd.Timedelta(in_)
elif ago:
dt -= pd.Timedelta(ago)
dt = dt.tz_localize(None)
required_until = pd.date_range(start=dt, periods=5,
freq=until, normalize=False)
# For >1D freq, normalize to midnight
if required_until.freq.is_anchored() or required_until.freq.rule_code == "D":
required_until = pd.date_range(start=dt, periods=5,
freq=until, normalize=True)
# due to normalize=True, the date range might include a time before the
# start dt; filter it out
required_until = required_until[required_until > dt]
required_until = required_until[0]
else:
# If not normalized, the first value is dt, so use the second value
required_until = required_until[1]
actual_until =
|
pd.Timestamp(status["until"])
|
pandas.Timestamp
|
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import pandas as pd
from dlib import masternode_tax_calc
import os
from json import JSONDecodeError
app = dash.Dash()
# app.scripts.config.serve_locally = True
# app.css.config.serve_locally = True
app.layout = html.Div([
html.H4('Calculate Dash Taxes!'),
html.H6('Sometimes it does crash... Working on adding features.'),
html.Br(),
html.Br(),
html.H5('Enter Dash address below'),
html.Div([
dcc.Input(
id='address',
placeholder='XxVpWcsE92qWTrZWfmGTqrCzpBaKhRf2tX',
value='',
size=40,
type='text',
pattern='^X[1-9A-HJ-NP-Za-km-z]{33}'
)],
className='address-bar'
),
html.Button('Calculate', id='calc-but', n_clicks=0),
dcc.Graph(
id='tax-graph',
),
html.Br(),
dt.DataTable(
# Initialise the rows
rows=[{}],
filterable=True,
sortable=True,
selected_row_indices=[],
id='tx_table'
),
html.Br(),
html.Br(),
], className='container')
@app.callback(
Output('tx_table', 'rows'),
[Input('calc-but', 'n_clicks')], [State('address', 'value')])
def get_address_info(n_clicks, value):
try:
cost_basis = masternode_tax_calc.generate_cost_basis(value)
except JSONDecodeError:
cost_basis = {
'amount': 0,
'time': 15223753709,
'date': '2018-01-01',
'type': 'normal',
'cost_basis': '0',
}
df = pd.DataFrame.from_records(cost_basis).sort_values(by=['date'], ascending=False)
"""
For user selections, return the relevant in-memory data frame.
"""
df = df.to_dict('records')
return df
@app.callback(
Output('tax-graph', 'figure'),
[Input('tx_table', 'rows')])
def update_figure(rows):
dff =
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.