prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import unittest
import pdb
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal, assert_index_equal
from ..models.condition_models import RuleKPI, RuleCondition, RuleConditionalOperator, RuleConditionGroup, RuleConditionGroupOperator
class Test_conditional_operator(unittest.TestCase):
def setUp(self):
"""
Create sample data
"""
d = {
"datum": pd.Series([3., 2., 1., np.nan]),
"criterion": pd.Series([np.nan, 1., 2., 3.]),
}
self.df = pd.DataFrame(d)
def test_greater(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.greater
index = operator.selectedIndex(self.df, "criterion", 2.)
self.assertEqual(index, pd.Int64Index([3]))
def test_less(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.less
index = operator.selectedIndex(self.df, "criterion", 2.)
self.assertEqual(index, pd.Int64Index([1]))
def test_greater_than_or_equal(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.greaterThanOrEqual
index = operator.selectedIndex(self.df, "criterion", 2.)
assert_index_equal(index, pd.Int64Index([2, 3]))
def test_less_than_or_equal(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.lessThanOrEqual
index = operator.selectedIndex(self.df, "criterion", 2.)
assert_index_equal(index, pd.Int64Index([1, 2]))
def test_equal(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.equal
index = operator.selectedIndex(self.df, "criterion", 2.)
assert_index_equal(index, pd.Int64Index([2]))
class Test_condiition(unittest.TestCase):
"""
Test module for the search ads condition classes
"""
def test_spend(self):
"""
Test filtering by total spend
"""
d = {
"keywordId": pd.Series([1, 2, 1]),
"localSpend": pd.Series([1., 3., 3.]),
}
df = pd.DataFrame(d)
condition = RuleCondition(kpi=RuleKPI("totalSpend"),
operator=RuleConditionalOperator("greater"),
comparisonValue=3.)
index = condition.selectedIndex(df, groupByID="keywordId")
assert_index_equal(index, pd.Int64Index([0, 2]))
dataIndex = [0, 1, 2]
d = {
"keywordId": pd.Series([1, 2, 1], index=dataIndex),
"localSpend": pd.Series([1., 3., 3.], index=dataIndex),
"totalSpend": pd.Series([4., 3., 4.], index=dataIndex),
}
assert_frame_equal(df.sort_index(axis=1), pd.DataFrame(d).sort_index(axis=1))
def test_cpt(self):
"""
Test filtering by total CPT
"""
d = {
"keywordId": pd.Series([1, 2, 1]),
"localSpend": pd.Series([1., 3., 3.]),
"taps": pd.Series([0, 0, 2.]),
}
df = pd.DataFrame(d)
condition = RuleCondition(kpi=RuleKPI("reavgCPT"),
operator=RuleConditionalOperator("less"),
comparisonValue=3.)
index = condition.selectedIndex(df, groupByID="keywordId")
assert_index_equal(index, pd.Int64Index([0, 2]))
dataIndex = [0, 1, 2]
d = {
"keywordId": pd.Series([1, 2, 1], index=dataIndex),
"localSpend": pd.Series([1., 3., 3.], index=dataIndex),
"taps": | pd.Series([0, 0, 2.], index=dataIndex) | pandas.Series |
# Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Point Aggregator.
Given a collection of true reach values and a corresponding collection
of reach estimates, computes a single row DataFrame with summary statistics
on the differences between the true values and the estimated values.
"""
import numpy as np
import pandas as pd
from typing import List
from wfa_planning_evaluation_framework.models.reach_point import ReachPoint
# A list of functions for computing aggregation metrics. Each function
# takes as input the true reach and modeled reach points and produces
# as output a scalar value (could be float, int or string).
AGGREGATORS = {
# Number of test points
"npoints": lambda x, y: len(x),
# Mean error (bias)
"mean_error": lambda x, y: np.mean(_reach(x) - _reach(y)),
# Mean absolute error in predicted reach
"mean_abs_error": lambda x, y: np.mean(np.abs(_reach(x) - _reach(y))),
# Mean squared error in predicted reach
"mean_squared_error": lambda x, y: np.mean((_reach(x) - _reach(y)) ** 2),
# Mean absolute relative error in predicted reach
"mean_abs_relative_error": lambda x, y: _mean_abs_relative_error(x, y),
# Mean absolute relative error at frequencies 2..9
"mare_freq_at_least_2": lambda x, y: _mean_abs_relative_error(x, y, 2),
"mare_freq_at_least_3": lambda x, y: _mean_abs_relative_error(x, y, 3),
"mare_freq_at_least_4": lambda x, y: _mean_abs_relative_error(x, y, 4),
"mare_freq_at_least_5": lambda x, y: _mean_abs_relative_error(x, y, 5),
"mare_freq_at_least_6": lambda x, y: _mean_abs_relative_error(x, y, 6),
"mare_freq_at_least_7": lambda x, y: _mean_abs_relative_error(x, y, 7),
"mare_freq_at_least_8": lambda x, y: _mean_abs_relative_error(x, y, 8),
"mare_freq_at_least_9": lambda x, y: _mean_abs_relative_error(x, y, 9),
# Mean squared relative error in predicted reach
"mean_squared_relative_error": lambda x, y: np.mean(
(_reach(x) - _reach(y)) ** 2 / _reach(x) ** 2
),
# Error variance in predicted reach
"var_error": lambda x, y: np.var(_reach(x) - _reach(y)),
# Relative error variance in predicted reach
"var_relative_error": lambda x, y: np.var((_reach(x) - _reach(y)) / _reach(x)),
# Quantiles of relative error in predicted reach
"relative_error_q10": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.10
),
"relative_error_q20": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.20
),
"relative_error_q30": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.30
),
"relative_error_q40": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.40
),
"relative_error_q50": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.50
),
"relative_error_q60": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.60
),
"relative_error_q70": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.70
),
"relative_error_q80": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.80
),
"relative_error_q90": lambda x, y: np.quantile(
np.abs(_reach(x) - _reach(y)) / _reach(x), 0.90
),
# Mean shuffle distance
"mean_shuffle_distance": lambda x, y: np.mean(
[_shuffle_distance(x[i], y[i]) for i in range(len(x))]
),
# Mean squared shuffle distance
"mean_squared_shuffle_distance": lambda x, y: np.mean(
[_shuffle_distance(x[i], y[i]) ** 2 for i in range(len(x))]
),
# Variance of shuffle distance
"var_shuffle_distance": lambda x, y: np.var(
[_shuffle_distance(x[i], y[i]) for i in range(len(x))]
),
}
def _mean_abs_relative_error(x: ReachPoint, y: ReachPoint, k=1) -> float:
"""Returns the mean absolute relative error at freq k."""
if any([k > p.max_frequency for p in x]):
return np.nan
if any([k > p.max_frequency for p in y]):
return np.nan
return np.mean(np.abs(_reach(x, k) - _reach(y, k)) / _reach(x, k))
def _reach(point_list: List[ReachPoint], k=1) -> np.array:
"""Returns list of k+ frequencies from list of ReachPoints."""
return np.array([point.reach(k) for point in point_list])
def _shuffle_distance(xpoint: ReachPoint, ypoint: ReachPoint, k=5) -> float:
"""Computes shuffle distance of first k frequency buckets."""
if xpoint.max_frequency <= k or ypoint.max_frequency <= k:
return 1.0
xfreq = np.array([xpoint.frequency(i + 1) for i in range(k)])
yfreq = np.array([ypoint.frequency(i + 1) for i in range(k)])
if sum(xfreq) == 0 or sum(yfreq) == 0:
return 0.5
return 0.5 * np.sum(np.abs(xfreq / sum(xfreq) - yfreq / sum(yfreq)))
def aggregate(
true_reach: List[ReachPoint], simulated_reach: List[ReachPoint]
) -> pd.DataFrame:
"""Returns a DataFrame of the statistics listed in keys.
Args:
keys: A list of strings. Each string should specify the name of an aggregation
statistic, as given in AGGREGATORS.
true_reach: A list of points representing true reach values.
simulated_reach: A list of points representing modeled reach values. This list must
be of the same length as true_reach. The value of simulated_reach[i] should be the
output of the modeling function for the spend vector that was used to compute
true_reach[i].
Returns:
A single row DataFrame representing the values of the statistics listed in keys.
"""
stats = {"model_succeeded": [1], "model_exception": [""]}
for key in AGGREGATORS:
stats[key] = [AGGREGATORS[key](true_reach, simulated_reach)]
return pd.DataFrame(data=stats)
def aggregate_on_exception(inst: Exception) -> pd.DataFrame:
"""Returns a DataFrame of the same shape as aggregate but for case of an exception.
Args:
inst: The exception instance that was generated in the modeling attempt.
Returns:
A single row DataFrame of NAs with columns being the statistics listed in keys.
"""
stats = {"model_succeeded": [0], "model_exception": [str(inst)]}
for key in AGGREGATORS:
stats[key] = [np.NaN]
return | pd.DataFrame(data=stats) | pandas.DataFrame |
# Standard Library
from enum import Enum
# Third Party
import pandas as pd
# First Party
from smdebug.core.logger import get_logger
class StatsBy(Enum):
"""
Enum to get stats by different categories.
"""
# training phase such as TRAIN/EVAL/GLOBAL.
TRAINING_PHASE = "training_phase"
# framework metrics such as function names/ operator names
FRAMEWORK_METRICS = "framework_metric"
# event phase name as retrieved from events
PROCESS = "process"
class Resource(Enum):
"""
Enum to specify the device/resource specified in system metrics
"""
CPU = "cpu"
GPU = "gpu"
IO = "i/o"
NETWORK = "network"
MEMORY = "memory"
# Container class for job stats
class JobStats(dict):
def __setitem__(self, key, item):
self.__dict__[key] = item
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return repr( | pd.DataFrame.from_dict(self.__dict__) | pandas.DataFrame.from_dict |
from app import app
from bokeh.io import show
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.models import NumeralTickFormatter
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
from bokeh.layouts import gridplot
from flask import render_template, flash, redirect, url_for, request, jsonify
from flask_login import current_user, login_user, logout_user, login_required
from datetime import datetime
from math import pi
from app.db import get_db, query
from app.plot import formatter, hbar, multiline
import pandas as pd
import numpy as np
import math
@app.route('/employees', methods=['GET', 'POST'])
@login_required
def employees():
"""
Render employee page
"""
date_start = request.form.get('date_start', '2018-01-01')
date_end = request.form.get('date_end', '2018-01-31')
time_frame = request.form.get('time_frame')
if request.form.get('time_frame') is None:
time_frame = 'date'
else:
time_frame = request.form.get('time_frame')
# average order_numbers
avg = get_avg_selling_per(date_start, date_end)
avg_order = formatter(avg[0][0])
avg_revenue = formatter(avg[1][0], 'dollar')
# most revenue
revenue_total = get_employee_revenue_total(date_start, date_end)
# sql result is reversed due to the hbar layout
most_revenue_name = revenue_total.loc[9, 'employee']
# Revenue by employee
js_revenue_total, div_revenue_total = hbar(revenue_total, 'revenue', 'employee')
# most orders
orders_total = get_employee_orders_total(date_start, date_end)
# sql result is reversed due to the hbar layout
most_orders_name = orders_total.loc[9, 'employee']
# Order numbers by employee
js_orders_total, div_orders_total = hbar(orders_total, 'order_number', 'employee')
time_dict = {'date': 'date', 'ww': 'week', 'mon': 'month', 'q': 'quarter'}
# Top 5 revenue employee trend
rev_top10 = revenue_total.loc[::-1, 'employee'].tolist()
# sql result is reversed thus first reverse to correct sequence
rev_top5 = rev_top10[: 5]
rev_trend_data = get_employee_trend(date_start, date_end, time_frame, rev_top5, 'revenue')
rev_trend_js, rev_trend_div = multiline(rev_trend_data, time_dict[time_frame], 'revenue', 'dollar',
rev_top5[0], rev_top5[1], rev_top5[2], rev_top5[3], rev_top5[4])
# top 5 order number employee trend
num_top10 = orders_total.loc[::-1 , 'employee'].tolist()
num_top5 = num_top10[: 5]
num_trend_data = get_employee_trend(date_start, date_end, time_frame, num_top5, 'order_number')
num_trend_js, num_trend_div = multiline(num_trend_data, time_dict[time_frame], 'order_number', 'number',
num_top5[0], num_top5[1], num_top5[2], num_top5[3], num_top5[4])
# gender relation distribution in order
g = get_ec_gender(date_start, date_end)
gender = pd.Series(g).reset_index(name='orders').rename(columns={'index':'gender'})
gender['angle'] = gender['orders']/gender['orders'].sum() * 2*pi
gender['color'] = Category20c[len(g)]
gender_hover = HoverTool(tooltips=[('Gender', '@gender'), ('Order number', '@orders{0.00 a}')])
gender_fig = figure(sizing_mode='scale_width', height=400, toolbar_location=None,
tools=[gender_hover], x_range=(-0.5, 1.0))
gender_fig.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='gender', source=gender,
hover_color='red', hover_fill_alpha=0.8)
gender_fig.axis.axis_label=None
gender_fig.axis.visible=False
gender_fig.grid.grid_line_color = None
js_gender, div_gender = components(gender_fig)
# state relation distribution in order
s = get_ec_state(date_start, date_end)
state = | pd.Series(s) | pandas.Series |
# -*- coding: utf-8 -*-
import os
import sys
import shutil
from dotenv import find_dotenv
dotenv_path = find_dotenv()
project_dir = os.path.dirname(dotenv_path)
sys.path.append(project_dir)
from src.models.KeyWordsModel import KeyWordClassifier
import json
from glob import glob
import click
import logging
from pathlib import Path
import pandas as pd
INPUT_FILEPATH = r"D:\develop\hr-scoring\data\interim\pool-test.json"
OUTPUT_DIRPATH = r"D:\develop\hr-scoring\models\cv-with-labels"
def rm_files_in_dir(dir_path):
folder = dir_path
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
@click.command()
@click.argument('input_filepath', default=INPUT_FILEPATH)
@click.argument('output_dirpath', type=click.Path(), default=OUTPUT_DIRPATH)
@click.argument('search_roles', default='all')
def main(input_filepath, output_dirpath, search_roles):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info(f'Input FILE: {input_filepath}')
logger.info(f'Output Dir: {output_dirpath}')
logger.info(f'Search Roles: {search_roles}')
search_roles = search_roles.split('-')
rm_files_in_dir(output_dirpath)
logger.info(f'Delete Files in "{output_dirpath}')
resumes = None
with open(input_filepath, 'r', encoding='utf-8') as fd:
resumes = json.load(fd)
logger.info(f'Read Resumes {len(resumes)} in "{input_filepath}')
df_resumes = | pd.DataFrame(resumes) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from nowcasting_dataset.datamodule import NowcastingDataModule
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
from neptune.new.integrations.pytorch_lightning import NeptuneLogger
import logging
logging.basicConfig()
logger = logging.getLogger('nowcasting_dataset')
logger.setLevel(logging.DEBUG)
# In[2]:
import numpy as np
# In[3]:
BUCKET = Path('solar-pv-nowcasting-data')
# Solar PV data
PV_PATH = BUCKET / 'PV/PVOutput.org'
PV_DATA_FILENAME = PV_PATH / 'UK_PV_timeseries_batch.nc'
PV_METADATA_FILENAME = PV_PATH / 'UK_PV_metadata.csv'
# SAT_FILENAME = BUCKET / 'satellite/EUMETSAT/SEVIRI_RSS/OSGB36/all_zarr_int16_single_timestep_quarter_geospatial.zarr'
SAT_FILENAME = BUCKET / 'satellite/EUMETSAT/SEVIRI_RSS/OSGB36/all_zarr_int16_single_timestep.zarr'
# Numerical weather predictions
#NWP_BASE_PATH = BUCKET / 'NWP/UK_Met_Office/UKV_zarr'
#NWP_BASE_PATH = BUCKET / 'NWP/UK_Met_Office/UKV_single_step_and_single_timestep_all_vars.zarr'
NWP_BASE_PATH = BUCKET / 'NWP/UK_Met_Office/UKV_single_step_and_single_timestep_all_vars_full_spatial_2018_7-12_float32.zarr'
# In[4]:
params = dict(
batch_size=32,
history_len=6, #: Number of timesteps of history, not including t0.
forecast_len=12, #: Number of timesteps of forecast.
nwp_channels=(
't', 'dswrf', 'prate', 'r', 'sde', 'si10', 'vis', 'lcc', 'mcc', 'hcc')
)
# In[5]:
data_module = NowcastingDataModule(
pv_power_filename=PV_DATA_FILENAME,
pv_metadata_filename=f'gs://{PV_METADATA_FILENAME}',
sat_filename = f'gs://{SAT_FILENAME}',
# sat_channels =('HRV', 'WV_062', 'WV_073'),
nwp_base_path = f'gs://{NWP_BASE_PATH}',
pin_memory = True, #: Passed to DataLoader.
num_workers = 22, #: Passed to DataLoader.
prefetch_factor = 256, #: Passed to DataLoader.
n_samples_per_timestep = 8, #: Passed to NowcastingDataset
**params
)
# In[6]:
data_module.prepare_data()
# In[7]:
data_module.setup()
# ## Define very simple ML model
# In[8]:
import tilemapbase
from nowcasting_dataset.geospatial import osgb_to_lat_lon
# In[9]:
tilemapbase.init(create=True)
# In[10]:
def plot_example(batch, model_output, example_i: int=0, border: int=0):
fig = plt.figure(figsize=(20, 20))
ncols=4
nrows=2
# Satellite data
extent = (
float(batch['sat_x_coords'][example_i, 0].cpu().numpy()),
float(batch['sat_x_coords'][example_i, -1].cpu().numpy()),
float(batch['sat_y_coords'][example_i, -1].cpu().numpy()),
float(batch['sat_y_coords'][example_i, 0].cpu().numpy())) # left, right, bottom, top
def _format_ax(ax):
#ax.set_xlim(extent[0]-border, extent[1]+border)
#ax.set_ylim(extent[2]-border, extent[3]+border)
# ax.coastlines(color='black')
ax.scatter(
batch['x_meters_center'][example_i].cpu(),
batch['y_meters_center'][example_i].cpu(),
s=500, color='white', marker='x')
ax = fig.add_subplot(nrows, ncols, 1) #, projection=ccrs.OSGB(approx=False))
sat_data = batch['sat_data'][example_i, :, :, :, 0].cpu().numpy()
sat_min = np.min(sat_data)
sat_max = np.max(sat_data)
ax.imshow(sat_data[0], extent=extent, interpolation='none', vmin=sat_min, vmax=sat_max)
ax.set_title('t = -{}'.format(params['history_len']))
_format_ax(ax)
ax = fig.add_subplot(nrows, ncols, 2)
ax.imshow(sat_data[params['history_len']+1], extent=extent, interpolation='none', vmin=sat_min, vmax=sat_max)
ax.set_title('t = 0')
_format_ax(ax)
ax = fig.add_subplot(nrows, ncols, 3)
ax.imshow(sat_data[-1], extent=extent, interpolation='none', vmin=sat_min, vmax=sat_max)
ax.set_title('t = {}'.format(params['forecast_len']))
_format_ax(ax)
ax = fig.add_subplot(nrows, ncols, 4)
lat_lon_bottom_left = osgb_to_lat_lon(extent[0], extent[2])
lat_lon_top_right = osgb_to_lat_lon(extent[1], extent[3])
tiles = tilemapbase.tiles.build_OSM()
lat_lon_extent = tilemapbase.Extent.from_lonlat(
longitude_min=lat_lon_bottom_left[1],
longitude_max=lat_lon_top_right[1],
latitude_min=lat_lon_bottom_left[0],
latitude_max=lat_lon_top_right[0])
plotter = tilemapbase.Plotter(lat_lon_extent, tile_provider=tiles, zoom=6)
plotter.plot(ax, tiles)
############## TIMESERIES ##################
# NWP
ax = fig.add_subplot(nrows, ncols, 5)
nwp_dt_index = pd.to_datetime(batch['nwp_target_time'][example_i].cpu().numpy(), unit='s')
pd.DataFrame(
batch['nwp'][example_i, :, :, 0, 0].T.cpu().numpy(),
index=nwp_dt_index,
columns=params['nwp_channels']).plot(ax=ax)
ax.set_title('NWP')
# datetime features
ax = fig.add_subplot(nrows, ncols, 6)
ax.set_title('datetime features')
datetime_feature_cols = ['hour_of_day_sin', 'hour_of_day_cos', 'day_of_year_sin', 'day_of_year_cos']
datetime_features_df = pd.DataFrame(index=nwp_dt_index, columns=datetime_feature_cols)
for key in datetime_feature_cols:
datetime_features_df[key] = batch[key][example_i].cpu().numpy()
datetime_features_df.plot(ax=ax)
ax.legend()
ax.set_xlabel(nwp_dt_index[0].date())
# PV yield
ax = fig.add_subplot(nrows, ncols, 7)
ax.set_title('PV yield for PV ID {:,d}'.format(batch['pv_system_id'][example_i].cpu()))
pv_actual = pd.Series(
batch['pv_yield'][example_i].cpu().numpy(),
index=nwp_dt_index,
name='actual')
pv_pred = pd.Series(
model_output[example_i].detach().cpu().numpy(),
index=nwp_dt_index[params['history_len']+1:],
name='prediction')
| pd.concat([pv_actual, pv_pred], axis='columns') | pandas.concat |
"""
Processing data from the output database.
"""
import logging
from typing import List
from datetime import date
import numpy as np
import pandas as pd
from autumn.tools.db.database import get_database
from autumn.tools.db.load import load_mcmc_tables
from autumn.tools.utils.runs import read_run_id
logger = logging.getLogger(__name__)
def collate_databases(src_db_paths: List[str], target_db_path: str, tables=None):
"""
Collate the output of many calibration databases into a single database.
Run names are renamed to be ascending in the final database.
"""
logger.info("Collating db outputs into %s", target_db_path)
target_db = get_database(target_db_path)
for db_path in src_db_paths:
logger.info("Reading data from %s", db_path)
source_db = get_database(db_path)
for table_name in source_db.table_names():
if tables and table_name not in tables:
logger.info("Skipping table %s", table_name)
continue
logger.info("Copying table %s", table_name)
table_df = source_db.query(table_name)
target_db.dump_df(table_name, table_df)
logger.info("Finished collating db outputs into %s", target_db_path)
def find_mle_run(df: pd.DataFrame) -> pd.DataFrame:
accept_mask = df["accept"] == 1
max_ll = df[accept_mask]["loglikelihood"].max()
max_ll_mask = accept_mask & (df["loglikelihood"] == max_ll)
return df[max_ll_mask].copy()
def find_mle_params(mcmc_df: pd.DataFrame, param_df: pd.DataFrame) -> dict:
mle_run_df = find_mle_run(mcmc_df)
run_id = mle_run_df["run"].iloc[0]
chain_id = mle_run_df["chain"].iloc[0]
param_mask = (param_df["run"] == run_id) & (param_df["chain"] == chain_id)
params = {}
for _, row in param_df[param_mask].iterrows():
params[row["name"]] = row["value"]
return params
def get_identifying_run_ids(table: pd.DataFrame) -> pd.Series:
"""
Args:
table (pd.DataFrame): Table with 'run' and 'chain' columns
Returns:
pd.Series: Combined run identifier of same length as table
"""
return table["chain"].astype(str) + ":" + table["run"].astype(str)
def select_pruning_candidates(src_db_path: str, n_candidates: int, weighted=True) -> pd.DataFrame:
"""Select a random set of 'good enough' candidates for manual inspection
The output set will be guaranteed to contain the highest
MLE run from all the chains, in addition to randomly selected candidates
Args:
src_db_path (str): Base path of calibration run (containing subdirectories for each chain)
n_candidates (int): Number of candidates to select. If 1, then only the MLE run from all chains will be selected
weighted (bool): Weight candidates by 1.0/loglikelihood (False means uniform selection)
Returns:
candidates (pd.DataFrame): DataFrame containing unique identifiers (chain_id, run_id) of all candidates
"""
# +++ FIXME/TODO
# We just use a naive random selection, disregarding burn-in etc
# Could possibly use selection routine from sample_outputs_for_calibration_fit
# Load all MCMC run data to select from
all_mcmc_df = pd.concat(load_mcmc_tables(src_db_path), ignore_index=True)
all_accepted = all_mcmc_df[all_mcmc_df["accept"] == 1]
# Find the MLE candidate
max_ll = all_accepted["loglikelihood"].max()
max_ll_candidate = all_accepted[all_accepted["loglikelihood"] == max_ll].iloc[0].name
# Ensure candidates have been sampled and that output data is available
accepted_and_sampled = all_accepted[all_accepted["sampled"] == 1]
# Sample random candidates
possible_candidates = list(accepted_and_sampled.index)
if max_ll_candidate in possible_candidates:
possible_candidates.remove(max_ll_candidate)
if weighted:
# +++ FIXME Adding 10.0 to not overweight, should parameterise this
weights = 1.0 / (
10.0 + np.abs(np.array(accepted_and_sampled.loc[possible_candidates].loglikelihood))
)
weights = weights / weights.sum()
else:
weights = None
# Ensure we aren't sampling too many candidates (most likely to show up in testing)
n_candidates = min(n_candidates, len(possible_candidates))
candidates = list(
np.random.choice(possible_candidates, n_candidates - 1, replace=False, p=weights)
)
# Ensure we have the max likelihood candidate
candidates.append(max_ll_candidate)
candidates_df = all_accepted.loc[candidates]
return candidates_df
def prune_chain(source_db_path: str, target_db_path: str, chain_candidates: pd.DataFrame):
"""
Read the model outputs from a database and removes output data that is not MLE.
This is an operation applied to each chain's database.
"""
logger.info("Pruning %s into %s", source_db_path, target_db_path)
source_db = get_database(source_db_path)
target_db = get_database(target_db_path)
# Copy tables over, pruning some.
tables_to_copy = source_db.table_names()
for table_name in tables_to_copy:
table_df = source_db.query(table_name)
if table_name == "outputs":
# Drop everything except the MLE run
logger.info("Pruning outputs so that it only contains candidate runs")
candidate_mask = table_df["run"].isin(chain_candidates["run"])
candidate_table_df = table_df[candidate_mask]
target_db.dump_df(table_name, candidate_table_df)
elif table_name:
# Copy table over (mcmc_run, mcmc_params, derived_outputs)
# We need to keep derived outputs here to be used by uncertainty calculations
logger.info("Copying %s", table_name)
target_db.dump_df(table_name, table_df)
logger.info("Finished pruning %s into %s", source_db_path, target_db_path)
def prune_final(source_db_path: str, target_db_path: str, candidates_df: pd.DataFrame):
"""
Read the model outputs from a database and remove all run-related data that is not MLE.
This is the final pruning for the collated database.
"""
logger.info("Pruning %s into %s", source_db_path, target_db_path)
source_db = get_database(source_db_path)
target_db = get_database(target_db_path)
# Find the maximum accepted loglikelihood for all runs
mcmc_run_df = source_db.query("mcmc_run")
mle_run_df = find_mle_run(mcmc_run_df)
mle_run_id = mle_run_df.run.iloc[0]
mle_chain_id = mle_run_df.chain.iloc[0]
# Copy tables over, pruning some.
tables_to_copy = source_db.table_names()
for table_name in tables_to_copy:
table_df = source_db.query(table_name)
if table_name == "derived_outputs":
# Drop everything except the candidate runs
logger.info("Pruning derived_outputs so that it only contains candidate runs")
candidate_iruns = get_identifying_run_ids(candidates_df)
table_df["irun_id"] = get_identifying_run_ids(table_df)
filtered_table_df = table_df[table_df["irun_id"].isin(candidate_iruns)]
final_df = filtered_table_df.drop(columns="irun_id")
target_db.dump_df(table_name, final_df)
elif table_name:
# Copy table over (outputs, mcmc_run, mcmc_params)
# Note: Outputs has already been pruned to candidates in early prune_chains sweep
logger.info("Copying %s", table_name)
target_db.dump_df(table_name, table_df)
logger.info("Finished pruning %s into %s", source_db_path, target_db_path)
def powerbi_postprocess(source_db_path: str, target_db_path: str, run_id: str):
"""
Read the model outputs from a database and then convert them into a form
that is readable by our PowerBI dashboard.
Save the converted data into its own database.
"""
from autumn.tools.project import get_project
source_db = get_database(source_db_path)
target_db = get_database(target_db_path)
tables_to_copy = [t for t in source_db.table_names() if t != "outputs"]
for table_name in tables_to_copy:
logger.info("Copying %s", table_name)
table_df = source_db.query(table_name)
if table_name == "uncertainty":
# Rename "time" field to "times"
table_df.rename(columns={"time": "times"})
target_db.dump_df(table_name, table_df)
app_name, region_name, timestamp, git_commit = read_run_id(run_id)
# Add build metadata table
build_key = f"{timestamp}-{git_commit}"
logger.info("Adding 'build' metadata table with key %s", build_key)
build_df = pd.DataFrame.from_dict(
{"build_key": [build_key], "app_name": [app_name], "region_name": [region_name]}
)
target_db.dump_df("build", build_df)
# Add scenario metadata table
logger.info("Adding 'scenario' metadata table")
project = get_project(app_name, region_name)
basline_params = project.param_set.baseline.to_dict()
sc_params = [sc.to_dict() for sc in project.param_set.scenarios]
# Add default scenario
scenario_data = [
{
"scenario": 0,
"start_time": int(basline_params["time"]["start"]),
"description": basline_params.get("description", ""),
}
]
for sc_idx, sc_params in enumerate(sc_params):
sc_datum = {
"scenario": int(sc_idx + 1),
"start_time": int(sc_params["time"]["start"]),
"description": sc_params.get("description", ""),
}
scenario_data.append(sc_datum)
scenario_df = pd.DataFrame(scenario_data)
target_db.dump_df("scenario", scenario_df)
# Add calibration targets
logger.info("Adding 'targets' table")
targets_data = []
for target in project.calibration.targets:
for t, v in zip(target["years"], target["values"]):
t_datum = {
"key": target["output_key"],
"times": t,
"value": v,
}
targets_data.append(t_datum)
targets_df = pd.DataFrame(targets_data)
target_db.dump_df("targets", targets_df)
logger.info("Converting outputs to PowerBI format")
outputs_df = source_db.query("outputs")
pbi_outputs_df = unpivot_outputs(outputs_df)
target_db.dump_df("powerbi_outputs", pbi_outputs_df)
logger.info("Finished creating PowerBI output database at %s", target_db_path)
def unpivot_outputs(output_df: pd.DataFrame):
"""
Take outputs in the form they come out of the model object and convert them into a "long", "melted" or "unpiovted"
format in order to more easily plug to PowerBI
"""
id_cols = ["chain", "run", "scenario", "times"]
value_cols = [c for c in output_df.columns if c not in id_cols]
output_df = output_df.melt(id_vars=id_cols, value_vars=value_cols)
cols = {"compartment"}
def label_strata(row: list):
strata = {"compartment": row[0]}
for el in row[1:]:
parts = el.split("_")
k = parts[0]
# FIXME: Use this once Milinda can use it in PowerBI
# v = "_".join(parts[1:])
strata[k] = el
cols.add(k)
return strata
variables = (s.split("X") for s in output_df.variable)
new_cols_df = pd.DataFrame([label_strata(row) for row in variables])
output_df = output_df.join(new_cols_df)
output_df = output_df.drop(columns="variable")
return output_df
def sample_runs(mcmc_df: pd.DataFrame, num_samples: int):
"""
Returns a list of chain ids + run ids for each sampled run.
Choose runs with probability proprotional to their acceptance weights.
"""
run_choices = list(zip(mcmc_df["chain"].tolist(), mcmc_df["run"].tolist()))
assert num_samples < len(run_choices), "Must be more samples than choices"
weights = mcmc_df["weight"].to_numpy()
sample_pr = weights / weights.sum()
idxs = np.array([i for i in range(len(weights))])
chosen_idxs = np.random.choice(idxs, size=num_samples, replace=False, p=sample_pr)
chosen_runs = [run_choices[i] for i in chosen_idxs]
return chosen_runs
def select_outputs_from_candidates(
output_name: str,
derived_output_tables: pd.DataFrame,
candidates_df: pd.DataFrame,
ref_date: date,
):
out_df = pd.DataFrame()
for idx, c in candidates_df.iterrows():
chain = int(c["chain"])
run = int(c["run"])
ctable = derived_output_tables[chain]
run_mask = ctable["run"] == run
scenario_mask = ctable["scenario"] == 0
masked = ctable[run_mask & scenario_mask]
name = f"{chain}_{run}"
out_df[name] = pd.Series(
index=timelist_to_dti(masked["times"], ref_date), data=masked[output_name].data
)
return out_df
def timelist_to_dti(times, ref_date):
datelist = [ref_date + pd.offsets.Day(t) for t in times]
return | pd.DatetimeIndex(datelist) | pandas.DatetimeIndex |
import pandas as pd
from pandas import DataFrame
import numpy as np
from sklearn import metrics
import heapq
import random
from random import choice
from copy import deepcopy
def calclass(value, threshold):
new_value = []
for i in value:
if i > threshold:
new_value.append(1)
else:
new_value.append(0)
return new_value
def binary_perf_ddG(Y_true, Y_pred, threshold=1.36):
y_true = calclass(Y_true, 1.36)
y_pred = calclass(Y_pred, threshold)
# calculate the precision, recall and F1
F1_score = metrics.f1_score(y_true, y_pred)
Recall_score = metrics.recall_score(y_true, y_pred)
Precision_score = metrics.precision_score(y_true, y_pred)
Balanced_accuracy_score = metrics.balanced_accuracy_score(y_true, y_pred)
MMC = metrics.matthews_corrcoef(y_true, y_pred)
# record the performance
perf = {
'Recall': Recall_score,
'Precision': Precision_score,
'Balanced Accuracy': Balanced_accuracy_score,
'F1 Score': F1_score,
'Matthews Correlation Coefficient': MMC
}
return perf
def binary_perf_top(Y_true, Y_pred, threshold=0.15):
top = int(len(Y_pred)*threshold)
top_index = heapq.nlargest(top, range(len(Y_pred)), Y_pred.__getitem__)
top_pred = []
for i in range(len(Y_pred)):
if i in top_index:
top_pred.append(1)
else:
top_pred.append(0)
y_true = calclass(Y_true, 1.36)
perf = {
'Recall': metrics.recall_score(y_true, top_pred),
'Precision': metrics.precision_score(y_true, top_pred),
'Balanced Accuracy': metrics.balanced_accuracy_score(y_true, top_pred),
'F1 Score': metrics.f1_score(y_true, top_pred),
'Matthews Correlation Coefficient': metrics.matthews_corrcoef(y_true, top_pred)
}
return perf
def cal_performance(result_list):
result_min = min(result_list)
result_max = max(result_list)
result_mean = np.mean(result_list)
result_var = np.var(result_list)
result_median = np.median(result_list)
result_std = np.std(result_list, ddof = 1)
results = pd.DataFrame(columns=['value'])
results.loc['min'] = result_min
results.loc['max'] = result_max
results.loc['mean'] = result_mean
results.loc['var'] = result_var
results.loc['median'] = result_median
results.loc['std'] = result_std
return results
def random_select_samples_group(X_sel, Y, tki, Y_tki, group_tki, group_train, feature_name):
group_dict = {}
select_num = 2
mask = [c for c in feature_name]
X_test = tki[mask]
Y_test = Y_tki
for index, value in enumerate(group_tki):
if value not in group_dict.keys():
group_dict[value] = []
group_dict[value].append(index)
else:
group_dict[value].append(index)
selected_tki = []
for key in group_dict:
slice = random.sample(group_dict[key], select_num)
selected_tki.extend(slice)
print("Selected sample list:", selected_tki)
tki_list = [i for i in range(len(Y_test))]
tki_rest = list(set(tki_list).difference(set(selected_tki)))
X_test_s = X_test.loc[selected_tki]
Y_test_s = Y_test.loc[selected_tki]
X_test_new = X_test.loc[tki_rest]
Y_test_new = Y_test.loc[tki_rest]
X_sel.columns = feature_name
# Reset the group information
group_tki_select = group_tki.loc[selected_tki]
group_tki_new = ['Abl' for i in group_tki_select]
group_tki_new = | pd.Series(group_tki_new) | pandas.Series |
# CONTINUED IN https://github.com/rcsmit/COVIDcases/blob/main/covid_dashboard_rcsmit.py
# 27/28 feb 2021
# Calculate the relation between gliding R and mobility (Apple and Google)
# Calculate the corelation with hospital admissions and factors mentioned above
# Plotting a heatmap with correlations
# Plotting a scattermap
# Plotting a graph in time, with an adjusted x-
# 1 maart 2021
# Merging files on date in different date formats
# Remove outliers (doesnt work)
# Calculating moving avarages
# Make different statistics for weekdays and weekend
# Scraping statistics from RIVM
# 2 maart
# R van ziekenhuisopnames
# weekgrafiek
# corrigeren vd merge functie
# 3 maart
# added restrictions (file van @HK_nien, MIT-licence)
# downloaden en mergen hospital admission
# downloaden en mergen r-getal RIVM
# alles omgeFzet in functies
# 4 maart
# meer onderverdeling in functies. Alles aan te roepen vanuit main() met parameters
# 5 maart
# custom colors
# weekend different color in barplot
# annoying problem met een join (van outer naar inner naar outer en toen werkte het weer)
# R value (14 days back due to smoothing)
#6 maart
# last row in bar-graph was omitted due to ["date of statistics"] instead of ["date"] in addwalkingR
# Bug wit an reset.index() somewhere. Took a long time to find out
# Tried to first calculate SMA and R, and then cut of FROM/UNTIL. Doesnt
# work. Took also a huge amount of time. Reversed everything afterwards
# 7 maart
# weekgraph function with parameters
# 8 maart
# find columns with max correlation
# find the timelag between twee columns
# added a second way to calculate and display R
# 9-11 maart: Grafieken van Dissel : bezetting bedden vs R
# 12 maart
# Genormeerde grafiek (max = 1 of begin = 100)
# Various Tg vd de R-number-curves
# I used iloc. Iterating through pandas objects is generally slow.
# In many cases, iterating manually over the rows is not needed and
# can be avoided with one of the following approaches:
# http://pandas-docs.github.io/pandas-docs-travis/getting_started/basics.html#iteration
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import seaborn as sn
from scipy import stats
import datetime as dt
from datetime import datetime, timedelta
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import matplotlib.ticker as ticker
import math
_lock = RendererAgg.lock
from scipy.signal import savgol_filter
import urllib
import urllib.request
from pathlib import Path
from inspect import currentframe, getframeinfo
# R-numbers from 'https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json'
# Google mobilty from https://www.google.com/covid19/mobility/?hl=nl
# Apple mobility from https://covid19.apple.com/mobility
# # Merged in one file in Excel and saved to CSV
# Hospitals from RIVM 'https://data.rivm.nl/covid-19/COVID-19_ziekenhuisopnames.csv
def download_mob_r():
""" _ _ _ """
df_mob_r = pd.read_csv(
r'covid19_seir_models\input\mobility.csv',
delimiter=';',
low_memory=False
)
# datum is 16-2-2020
df_mob_r['date']= | pd.to_datetime(df_mob_r['date'], format="%d-%m-%Y") | pandas.to_datetime |
'''
Pandas: data manipulation
=========================
It is often said that 80% of data analysis is spent on the cleaning and
small, but important, aspect of data manipulation and cleaning with Pandas.
**Sources**:
- <NAME>: https://github.com/justmarkham
- Pandas doc: http://pandas.pydata.org/pandas-docs/stable/index.html
**Data structures**
- **Series** is a one-dimensional labeled array capable of holding any data
type (integers, strings, floating point numbers, Python objects, etc.).
The axis labels are collectively referred to as the index. The basic method
to create a Series is to call `pd.Series([1,3,5,np.nan,6,8])`
- **DataFrame** is a 2-dimensional labeled data structure with columns of
potentially different types. You can think of it like a spreadsheet or SQL
table, or a dict of Series objects. It stems from the `R data.frame()`
object.
'''
import pandas as pd
import numpy as np
##############################################################################
# Create DataFrame
# ----------------
columns = ['name', 'age', 'gender', 'job']
user1 = pd.DataFrame([['alice', 19, "F", "student"],
['john', 26, "M", "student"]],
columns=columns)
user2 = pd.DataFrame([['eric', 22, "M", "student"],
['paul', 58, "F", "manager"]],
columns=columns)
user3 = pd.DataFrame(dict(name=['peter', 'julie'],
age=[33, 44], gender=['M', 'F'],
job=['engineer', 'scientist']))
print(user3)
##############################################################################
# Combining DataFrames
# --------------------
##############################################################################
# Concatenate DataFrame
# ~~~~~~~~~~~~~~~~~~~~~
##############################################################################
# Concatenate columns (axis = 1).
height = pd.DataFrame(dict(height=[1.65, 1.8]))
print(user1, "\n", height)
print(pd.concat([user1, height], axis=1))
##############################################################################
# Concatenate rows (default: axis = 0)
users = pd.concat([user1, user2, user3])
print(users)
##############################################################################
# Concatenate rows: append
user1.append(user2)
##############################################################################
# Join DataFrame
# ~~~~~~~~~~~~~~
user4 = pd.DataFrame(dict(name=['alice', 'john', 'eric', 'julie'],
height=[165, 180, 175, 171]))
print(user4)
##############################################################################
# Use intersection of keys from both frames
merge_inter = | pd.merge(users, user4) | pandas.merge |
#%%
import requests
import json
import pandas as pd
headers = {'Content-type': 'application/json'}
#api_key = [enter key here]
def get_json(url):
req = requests.get(url=url, headers=headers)
return req.json()['results']
#%%
#%%
def get_df():
url = 'http://api.reimaginebanking.com/enterprise/customers?key=' + api_key
X = get_json(url)
addresses = [cust['address'] for cust in X]
X = [{k: v for k, v in d.items() if k != 'address'} for d in X]
X = [{**d1, **d2} for d1, d2 in zip(X, addresses)]
#%%
cust_df = | pd.DataFrame(data=X) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
# with AM goes through split-path, loses dtype
@td.skip_array_manager_not_yet_implemented
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.PandasArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)
ser = df["A"]
arr = ser._values
msg = "|".join(
[
r"int\(\) argument must be a string, a bytes-like object or a "
"(real )?number, not 'NaTType'",
r"timedelta64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
r"datetime64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
"object cannot be converted to a FloatingDtype",
"'values' contains non-numeric NA",
]
)
with pytest.raises(TypeError, match=msg):
arr[0] = null
with pytest.raises(TypeError, match=msg):
arr[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser[0] = null
with pytest.raises(TypeError, match=msg):
ser[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser.iloc[0] = null
with pytest.raises(TypeError, match=msg):
ser.iloc[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
df.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df.iloc[:2, 0] = [null, null]
# Multi-Block
df2 = df.copy()
df2["B"] = ser.copy()
with pytest.raises(TypeError, match=msg):
df2.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df2.iloc[:2, 0] = [null, null]
def test_loc_expand_empty_frame_keep_index_name(self):
# GH#45621
df = DataFrame(columns=["b"], index=Index([], name="a"))
df.loc[0] = 1
expected = DataFrame({"b": [1]}, index=Index([0], name="a"))
tm.assert_frame_equal(df, expected)
def test_loc_expand_empty_frame_keep_midx_names(self):
# GH#46317
df = DataFrame(
columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"])
)
df.loc[(1, 2, 3)] = "foo"
expected = DataFrame(
{"d": ["foo"]},
index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]),
)
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
df = uint64_frame
idx = df["A"].rename("foo")
# setitem
assert "C" not in df.columns
df["C"] = idx
tm.assert_series_equal(df["C"], | Series(idx, name="C") | pandas.Series |
import datetime
import time
import gspread
from gspread_dataframe import set_with_dataframe
import json
import os
import pandas as pd
import requests
username = os.getenv("PELOTON_USERNAME")
password = os.getenv("PELOTON_PASSWORD")
user_id = os.getenv("PELOTON_USER_ID")
service_account_creds = os.getenv("SERVICE_ACCOUNT_CREDS")
worksheet_key = os.getenv("WORKSHEET_KEY")
sheet_index = int(os.getenv("SHEET_INDEX"))
KEYFILE = 'service_account_creds.json'
def convertEpochToTimestamp(epoch):
date_format = "%Y-%m-%dT%H:%m:%S"
created_at_datetime = datetime.datetime.fromtimestamp(epoch)
created_at_date = created_at_datetime.strftime(date_format)
return created_at_date
def authenticate():
# start requests session & authenticate
s = requests.Session()
payload = {'username_or_email': username, 'password': password}
s.post('https://api.onepeloton.com/auth/login', json=payload)
return s
def getWorkouts(s):
# get workouts
route = f"https://api.onepeloton.com/api/user/{user_id}/workouts?limit=99999"
my_results = s.get(route).json()
my_results_df = pd.DataFrame(my_results['data'])
my_results_df['created_at_timestamp'] = [convertEpochToTimestamp(x) for x in my_results_df['created_at']]
my_results_df['start_time_timestamp'] = [convertEpochToTimestamp(x) for x in my_results_df['start_time']]
my_results_df['end_time_timestamp'] = [convertEpochToTimestamp(x) for x in my_results_df['end_time']]
my_results_df = my_results_df[[
'created_at_timestamp'
, 'start_time_timestamp'
, 'end_time_timestamp'
, 'id'
, 'is_total_work_personal_record'
, 'status'
, 'total_work']]
# filter to workouts on or after April 1, 2021
# my_results_df = my_results_df[my_results_df['created_at']>=1617458062]
return my_results_df
def getWorkoutDetails(s, workouts_df):
workout_ids = workouts_df['id']
workout_data_fields = [
'description'
, 'difficulty_rating_avg'
, 'duration'
, 'id'
, 'image_url'
, 'instructor_id'
, 'is_explicit'
, 'length'
, 'location'
, 'overall_rating_avg'
, 'overall_rating_count'
, 'ride_type_id'
, 'series_id'
, 'title'
]
base_uri = "https://api.onepeloton.com/api/workout/"
workouts_dict_list = []
i = 1
total = len(workout_ids)
for workout_id in workout_ids:
print(f"Processing workout {i} out of {total}...")
route = base_uri + workout_id
workout_details = s.get(route).json()
workout_details_dict = {k: v for k, v in workout_details['ride'].items() if k in workout_data_fields}
workout_details_dict['workout_id'] = workout_id
workouts_dict_list.append(workout_details_dict)
i += 1
workout_df = | pd.DataFrame(workouts_dict_list) | pandas.DataFrame |
import pandas as pd
import gensim
import pickle
def create_dictionary(i):
nfile_lst = [57, 57, 56, 56, 56]
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
########################################################################
## 02/08/2021
## By <NAME>,
## <EMAIL>
## Peng's Lab
## Version.beta
########################################################################
# Usage
#python ${EXE_PATH} -b ${INPUT_FILE} -c ${INPUT_NAME} -k ${GENE_LIST_FOLDER}/${GENELISTFILE} -l ${GENELISTFILE: :-4} -r ${RESOLUTION} -f ${FRAGMENTSIZE} -g ${GTFFILE} \
# -w ${WINDOWSIZE} -n ${NORMALIZATION} -t ${REGIONTYPE} -u ${UP_EXTENSION} -d ${DOWN_EXTENSION} -o ${OUTPUTDIR} -p ${Genic_Partition}
########################################################################
import pandas as pd
import numpy as np
import igraph as ig
from scipy import stats
from optparse import OptionParser
import sys, os, multiprocessing
from gooey import Gooey
@Gooey
####################################################################################
## FUNCTIONS
### FUNCTION
def Norm_df_hic(_df_interaction, _col_fore, _col_back, _resolution):
col_fore = _col_fore
col_back = _col_back
resolution = _resolution
_df_interaction = _df_interaction.fillna(0).replace(0,1)
df_interaction = LOESS_Norm_df(_df_interaction, col_fore, col_back)
df_interaction.loc[:,'#chr']=df_interaction.iloc[:,0].astype(str).replace('chr','')
df_interaction.loc[:,'#chr1']=df_interaction.iloc[:,0]
df_interaction.loc[:,'x1']=df_interaction.iloc[:,1].astype(int)
df_interaction.loc[:,'x2']=df_interaction.iloc[:,1].astype(int)+int(resolution)
df_interaction.loc[:,'chr2']=df_interaction.iloc[:,0]
df_interaction.loc[:,'y1']=df_interaction.iloc[:,2].astype(int)
df_interaction.loc[:,'y2']=df_interaction.iloc[:,2].astype(int)+int(resolution)
if('logFC' in df_interaction.columns):
df_interaction.loc[:,'log_FC'] = df_interaction.loc[:,'logFC']
else:
df_interaction.loc[:,'log_FC'] = np.log2(df_interaction.loc[:,col_fore] / df_interaction.loc[:,col_back])
df_interaction = df_interaction.loc[:,['#chr1','x1','x2','chr2','y1','y2','log_FC', col_fore, col_back]]
return df_interaction
def LOESS_Norm_df (_df, _col1, _col2):
## this is a similar approach as LOESS Normalization
df_test = _df
n_bins = 100
df_test['A'] = 0.5*(np.log2(df_test[_col1]) + np.log2(df_test[_col2])).rank(method='first') ## A is the value for MA plot
df_test['label'] = pd.qcut(df_test['A'].values, q=np.arange(0,1+1/n_bins, 1/n_bins),
labels = np.arange(0,n_bins))#, duplicates='drop')
df_group = df_test.groupby('label')
df_out = pd.DataFrame()
for df_for_norm in df_group:
df_bbb = df_for_norm[1]
sum_1 = df_bbb[_col1].sum(axis=0)
sum_2 = df_bbb[_col2].sum(axis=0)
df_bbb[_col2] = round(df_bbb[_col2]/sum_2*sum_1, 2)
df_out = df_out.append(df_bbb)
return df_out.sort_index()
def Convert_Loops_to_Graph(_df_hic, _weight_col):
## Assign a list of weight ot graph
## loop format: ['#chr1', 'x1', 'x2', 'chr2', 'y1', 'y2', 'GeneID', 'weight_cols']
df_bins = Loops_Return_two_bins_no_dup(_df_hic)
## eliminate float in chr
df_bins['name'] = df_bins['#chr1'].astype(str).str.split(".",expand=True)[0]+':'+df_bins['x1'].astype(int).astype(str)+'-'+df_bins['x2'].astype(int).astype(str)
Num_vs = len(df_bins.index)
## Initiation a graph from loops file
graph_tem = ig.Graph()
graph_tem.add_vertices(Num_vs)
graph_tem.vs["name"] = df_bins.loc[:,'name']
df_edge = _df_hic.merge(df_bins, on=['#chr1', 'x1', 'x2']).merge(
df_bins, left_on=['chr2', 'y1', 'y2'], right_on=['#chr1', 'x1', 'x2'])
graph_tem.add_edges(df_edge.loc[:, ['index_x','index_y']].values)
for weight in _weight_col:
if (weight in _df_hic.columns):
graph_tem.es[weight] = df_edge.loc[:,weight].values
return graph_tem
def Loops_Return_two_bins_no_dup(df_hic):
## Associated by promoter
second_bin_columns = [3,4,5,0,1,2]+list(range(6,len(df_hic.columns),1))
df_hic=df_hic.append(pd.DataFrame(df_hic.iloc[:, second_bin_columns].values, columns=df_hic.columns),sort=False).sort_index()
return df_hic.iloc[:,0:3].drop_duplicates().reset_index().drop('index',axis=1).reset_index()
def convert_cluster2bed(df_cluster, usecol):
df_tem = df_cluster[usecol].str.split(r"\:|-",expand=True)
df_tem = pd.concat( [df_tem, df_cluster], axis=1)
if (df_tem.iloc[0,0].find('chr') == -1):
df_tem[0] = 'chr'+df_tem[0]
return df_tem
def convert_bin2bed(df_cluster, col_name):
df_tem = df_cluster[col_name].str.split(r"\:|-",expand=True)
df_tem = pd.concat( [df_tem, df_cluster], axis=1)
if (df_tem.iloc[0,0].find('chr') == -1):
df_tem[0] = 'chr'+df_tem[0]
return df_tem
def convert_vs2bed(input_graph, col_name):
## output first 3 columns is standard bed format
df_tem = pd.DataFrame(data={col_name:input_graph.vs[col_name]})
df_tem = pd.concat( [df_tem[col_name].str.split(r"\:|-",expand=True),df_tem], axis=1)
if (df_tem.iloc[0,0].find('chr') == -1):
df_tem[0] = 'chr'+df_tem[0]
return df_tem
def convert_graph_vs_to_df(_input_graph):
df_vs = pd.DataFrame(data= {"degree":_input_graph.degree()})
for col in _input_graph.vs.attributes():
df_vs[col] = _input_graph.vs[col]
return df_vs
def graph_community_multilevel_Blondel(input_graph, cutoff):
## input graph should have at least one attribute: name
df_vs = convert_graph_vs_to_df(input_graph)
_col_vs_name='name'
if (input_graph.is_weighted()):
print ("Weighted Graph Cluster")
structure = input_graph.community_multilevel(weights=input_graph.es['weight'] ,return_levels=False)
else:
structure = input_graph.community_multilevel(return_levels=False)
df_vs['membership'] = structure.membership
df_vs_cluster_group = df_vs.groupby('membership')
## Rank each cluster by number of bins
cluster_name=[]
cluster_num_vertices=[]
for df_vs_cluster in df_vs_cluster_group:
df_vs_inside_cluster = Cluster_Filter_by_Denisty(df_vs_cluster[1], _col_vs_name, 'degree', cutoff)
#df_vs_inside_cluster =df_vs_cluster[1]
df_cluster_coordiante = df_vs_inside_cluster[_col_vs_name].str.split(r"\:|-",expand=True)
cluster_coordinate = 'chr'+df_cluster_coordiante.iloc[0,0]+':'+str(df_cluster_coordiante.iloc[:,1].astype(int).min())+'-'+str(df_cluster_coordiante.iloc[:,2].astype(int).max())
cluster_name.append(cluster_coordinate) ##0: cluster name
cluster_num_vertices.append(len(df_vs_inside_cluster)) # 1: num_vertices
df_cluster_output = pd.DataFrame(data={'hub_name':cluster_name,'Num_vertices':cluster_num_vertices}).sort_values('Num_vertices', ascending=False)
return df_cluster_output, df_vs_cluster_group
def Graph_Pagerank(_input_graph):
input_graph = _input_graph
input_graph.vs['pagerank'] = input_graph.pagerank(weights=input_graph.es['weight'])
return input_graph
### allow a gap size of one window
def Stich_Region_Above_global_Mean(_graph, _resolution, _gap_size, _mean):
resolution=_resolution
df_vs_graph = convert_graph_vs_to_df(_graph)
df_nodes = convert_cluster2bed(df_vs_graph, 'name')
df_nodes[1] = df_nodes[1].astype(int)
df_nodes = df_nodes.sort_values(by=1)
df_nodes = df_nodes[df_nodes['pagerank'] > _mean] ## Only use nodes > mean
Report_list=[]
if (len(df_nodes)>0):
## report stich regions
reg_chr = str(df_nodes.iloc[0,0])
reg_start= int(df_nodes.iloc[0,1])
reg_end = int(reg_start)
for bin1 in df_nodes.iloc[:,1].astype(int):
if (bin1-reg_end)<=_gap_size*resolution:
reg_end = bin1
else:
Report_list.append([reg_chr+':'+str(reg_start)+'-'+str(reg_end+resolution), _gap_size])
reg_start = bin1
reg_end = bin1
Report_list.append([reg_chr+':'+str(reg_start)+'-'+str(reg_end+resolution), _gap_size])
return pd.DataFrame(data=Report_list, columns=['hub_name', 'merge_level'])
def Return_Sorted_Adjacency_Matrix(_graph, _attr):
## Sort by coordinate
graph_tem = _graph
attr =_attr
idx_name = [int(str(x).split(":")[1].split("-")[0]) for x in graph_tem.vs['name']]
matrix_tem = pd.DataFrame(data=graph_tem.get_adjacency(attribute=attr), columns=idx_name, index=idx_name)
df_reindex = pd.DataFrame(data={ 'rank': (stats.rankdata(matrix_tem.columns)-1).astype(int)})
idx_rank = df_reindex.sort_values(by='rank').index
## reference https://wil.yegelwel.com/cluster-correlation-matrix/
return matrix_tem.iloc[idx_rank, :].T.iloc[idx_rank, :]
def Pvalue_Rank_Test_Matrix(_matirx):
matrix_for_test = _matirx
data_test = matrix_for_test.fillna(0).values.flatten() ## flatten 2d into 1D
if (len(data_test)>10):
w, pvalue =stats.wilcoxon(data_test, zero_method='zsplit', alternative='greater', correction=True, mode='approx')
# “zsplit”: Includes zero-differences in the ranking process and split the zero rank between positive and negative ones.
else:
pvalue=1.0
return float(pvalue)
def Return_Pvalue_For_Given_Graph(_df_region, _resolution, _matrix):
df_region = _df_region
df_regionh_bed = convert_cluster2bed(df_region, 'hub_name').sort_values(by=1)
resolution = _resolution
matrix_for_test = _matrix
## convert each region into bins
idx_regs = []
for name_stitch in df_region.hub_name:
region_loc= name_stitch.split(":")[1].split("-")
idx_reg = []
for idx in matrix_for_test.index:
if ((idx>=int(region_loc[0]))&(idx<=int(region_loc[1]))):
idx_reg.append(idx)
idx_regs.append(idx_reg)
pvalue_region= []
for i in range(len(idx_regs)):
## first check on pyramid change, only after pyramid is significant, then calculate stripes.
part_matrix_for_test = matrix_for_test.loc[idx_regs[i],:].T.loc[idx_regs[i], :]
pvalue_tem = Pvalue_Rank_Test_Matrix(part_matrix_for_test)
if (pvalue_tem < 1):# ## cutoff 10**-2):
pvalue_region.append([df_region.hub_name[i],df_region.hub_name[i],-np.log10(pvalue_tem)])
for j in range(0, i, 1):
part_matrix_for_test = matrix_for_test.loc[idx_regs[i],:].T.loc[idx_regs[j], :]
pvalue_tem = Pvalue_Rank_Test_Matrix(part_matrix_for_test)
pvalue_region.append([df_region.hub_name[i],df_region.hub_name[j], np.round(-np.log10(pvalue_tem),3)])
return pd.DataFrame(data=pvalue_region, columns=['reg1', 'reg2', '-log10(pvalue)']).sort_values('-log10(pvalue)', ascending=False)
def Main_For_Diff_Regions(df_hic, _col_fore, _col_back, _resolution, _pvalue):
#Create a weight basing on logFC (logFC < 0)
_gapsize=2 ## this parameter is try to avoid blank due to artifacts
logfc_cutoff=0
cut_pvalue=-np.log10(_pvalue)
_df_hic = df_hic
_df_hic[_col_fore+'_weight'] = _df_hic[_col_fore]*_df_hic.log_FC.apply(lambda x: 1 if x > logfc_cutoff else(0))
Norm_window_Size=0 ### To be optimized for boundary
_df_hic['diff'] = _df_hic[_col_fore] - _df_hic[_col_back]
_df_hic['pagerank_weight'] = _df_hic['diff']*(abs(_df_hic.y1-_df_hic.x1)).apply(lambda x : 1 if x >= Norm_window_Size*_resolution else (0) )
_df_hic['pagerank_weight'] = _df_hic['pagerank_weight'].apply(lambda x : x if x >0 else (0) )
weight_list= ['diff','pagerank_weight', _col_fore+'_weight']
input_graph = Convert_Loops_to_Graph(_df_hic, weight_list)
######################################################## Diff_weight for pagerank
input_graph.es['weight'] = input_graph.es['pagerank_weight']#input_graph.es[_col_fore+'_weight'] #input_graph.es['pagerank_weight']
input_graph = Graph_Pagerank(input_graph)
global_median = np.percentile(input_graph.vs['pagerank'], 50)
cut_off = global_median# - np.std(input_graph.vs['pagerank'])
####################################################### Strength weight for structure
input_graph.es['weight'] = input_graph.es[_col_fore+'_weight']
structure = input_graph.community_multilevel(weights=input_graph.es['weight'], return_levels=True)
#######################################################
### Stich according to pagerank locally
df_out = | pd.DataFrame(columns=['reg1', 'reg2', '-log10(pvalue)']) | pandas.DataFrame |
from urllib import parse
import requests
import time
import os.path
from os import path
import argparse
import csv
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
target_url = 'https://pechlilab.shinyapps.io/output/'
async def test(source):
output = './'
try:
source_index = get_permitted_sources().index(source) + 1
except ValueError:
print('Invalid source parameter {}'.format(source))
print('Choose from {}'.format('\n'.join(get_permitted_sources())))
return
options = Options()
options.headless = True
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(10)
driver.get(target_url)
inputButton = driver.find_element_by_tag_name('input[value="{}"]'.format(source_index))
inputButton.click()
modelButton = driver.find_element_by_id('DisplayModel1')
modelButton.click()
time.sleep(5)
button = driver.find_element_by_id('Download')
data_endpoint = button.get_attribute('href')
# Download the file
data = requests.get(data_endpoint, allow_redirects=True)
lines = data.content.decode("utf-8").split('\n')
reader = csv.reader(lines)
parsed_csv = list(reader)
columns = parsed_csv[0]
columns[0]='date'
df = | pd.DataFrame(data=parsed_csv[1:], columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 13:55:59 2021
@author: tatia
"""
from dataproc.cohort import query_esbl_pts, remove_dups, observation_window
from dataproc.sampling import generate_samples
from dataproc.sampling import stratify_set
from dataproc.roc_auc_curves import plt_roc_auc_curve, plt_precision_recall_curve
from dataproc.create_dataset import dataset_creation
from dataproc.create_dataset import prescriptions
from dataproc.create_dataset import previous_admissions
from dataproc.create_dataset import open_wounds_diags, intubation_cpt, noteevents
from dataproc.embeddings import loinc_values
from hyper_params import HyperParams
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
import re
# load hyperparams instance
params = HyperParams()
def cohort_creation(observation_window_hours):
# Select esbl microbiology test
esbl_admits = query_esbl_pts()
# Remove dups
esbl_admits = remove_dups(esbl_admits)
# Create observation window
esbl_admits_window = observation_window(esbl_admits, window_size=observation_window_hours)
# Subset columns
pts_labels = esbl_admits_window[['hadm_id', 'index_date','RESISTANT_YN']]
return pts_labels
def loinc_values_proc(loinc_codes):
loinc_vals = loinc_values(loinc_codes)
loinc_vals.dropna(subset=['value'], inplace=True)
loinc_vals = loinc_vals.astype({'value': 'string', 'loinc_code': 'category'})
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('LESS THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('GREATER THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('>GREATER THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('<LESS THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.rstrip(' NG/ML'))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('<>'))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.replace(',', '.'))
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO ANALYZE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'MOLYSIS FALSELY DECREASES THIS RESULT'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'COMPUTER NETWORK FAILURE. TEST NOT RESULTED.'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO DETERMINE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == ':UNABLE TO DETERMINE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO QUANTITATE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO REPORT'].index), inplace=True)
return loinc_vals
def lab_records_categories(loinc_vals):
numeric = []
categorical = []
weird = []
for code in loinc_codes:
size = len(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'])
size_unique = len(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'].unique())
sum_na = pd.to_numeric(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'], errors='coerce').isna().sum()
if sum_na / size < 0.05:
numeric.append(code)
elif sum_na / size > 0.05 and size_unique < 100:
categorical.append(code)
else:
weird.append(code)
# Remove columns that are not useful:
# remove lab column that contains only 'inf' and 'Nan'
numeric.remove('26498-6')
# remove lab column that only contains phrase 'See comments'
categorical.remove('33914-3')
# remove lab column that contains phrase 'Random'
categorical.remove('13362-9')
return numeric, categorical, weird
def sum_stats_numeric_labs(loinc_vals, numeric):
numeric_stats = []
for code in numeric:
a = pd.to_numeric(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'], errors='coerce').describe()
numeric_stats.append(a)
numeric_stats_df = pd.concat(numeric_stats, axis=1, keys=numeric)
return numeric_stats_df
def stanardize_numeric_values(df, list_of_clms, ref_df):
"""
Use the median and interquartile range to
standardize the numeric variables
value = (value – median) / (p75 – p25)
"""
for code in list_of_clms:
median = ref_df[code]['50%']
p25 = ref_df[code]['25%']
p75 = ref_df[code]['75%']
df[code] = (df[code] - median) / (p75 - p25)
# Subset relevant columns
columns = ['hadm_id'] + list_of_clms
df = df[columns].copy()
return df
def replace_missing_val(df, list_of_clms, how='median'):
"""
Imputation of missing values using median
"""
imp = SimpleImputer(strategy=how)
df_prc = imp.fit_transform(df[list_of_clms])
df_prc = | pd.DataFrame(df_prc, columns=list_of_clms) | pandas.DataFrame |
from scipy.spatial.distance import euclidean
import pandas as pd
import numpy as np
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path to the images')
parser.add_argument('--fast', dest='fast', action='store_true', help='Analyze faster!')
parser.set_defaults(fast=False)
args = parser.parse_args()
df = pd.read_csv(os.path.join(args.path, 'features.csv'))
df_feat = df[[col for col in df if col.startswith('images_feat')]]
# Pre-sort by a norm so it's a little bit easier to find matches when using `--fast`
df['D'] = np.linalg.norm(df_feat, axis=1)
df = df.sort_values(by=['D']).reset_index(drop=True)
R = []
Left = df.index.tolist()
while Left:
if len(Left) % 10 == 0:
print('Sorting images... %d left' % len(Left))
if not R:
i = np.random.randint(len(Left))
Left.remove(i)
R.append(i)
i = R[-1]
best_j = Left[0]
best_sim = np.inf
if args.fast:
end = 101
limit = 1
else:
end = len(Left)
limit = np.inf
for j in Left[1:end]:
dist = (df.iloc[i]['D'] - df.iloc[j]['D'])**2
if dist < limit:
#TODO: make a better similarity measure
sim = 1/(1 + euclidean(df_feat.iloc[i], df_feat.iloc[j]))
if sim < best_sim:
best_j = j
best_sim = best_sim
else:
print(dist)
break
Left.remove(best_j)
R.append(best_j)
df['R'] = | pd.Series(R) | pandas.Series |
import argparse
import pathlib
import sys
import pandas as pd
FILE_FORMAT = "fastq"
def read_ena_xlsx_sheet(xlsx_path, sheet_name):
file_path = pathlib.Path(xlsx_path)
file_extension = file_path.suffix.lower()[1:]
if file_extension == "xlsx":
engine = "openpyxl"
else:
engine = None
df = (
pd.read_excel(
xlsx_path,
sheet_name=sheet_name,
engine=engine,
converters={"collection date": str},
)
.dropna(axis=0, how="all")
.dropna(axis=1, how="all")
)
assert not df.empty, f"Sheet '{sheet_name}' is empty in {xlsx_path}"
return df
def extract_data(xl_sheet, expected_columns, unique_key="alias"):
if any(xl_sheet.columns.value_counts() > 1):
sys.exit("Duplicated columns")
for col in range(len(expected_columns)):
assert (
expected_columns[col] in xl_sheet.columns
), f"Expected column {expected_columns[col]} not found"
assert not any(
xl_sheet.duplicated(subset=[unique_key])
), f"{unique_key} identificators not unique"
xl_sheet = xl_sheet.set_index(unique_key)
return xl_sheet.to_dict("index")
def main(xlsx_path, out_path, action, viral_submission=False):
# PARSE STUDIES
#################
xl_sheet = read_ena_xlsx_sheet(xlsx_path, sheet_name="ENA_study")
if xl_sheet.shape[0] < 1:
raise ValueError("No entries found in studies sheet")
studies_col = ["alias", "title", "study_type", "study_abstract"]
try:
studies_dict = extract_data(xl_sheet, studies_col)
except AssertionError as e:
print("Sheet ENA_study: ", e)
raise
# PARSE SAMPLES
#################
xl_sheet = read_ena_xlsx_sheet(xlsx_path, sheet_name="ENA_sample")
if xl_sheet.shape[0] < 1:
raise ValueError("No entries found in samples")
if viral_submission:
samples_cols = [
"alias",
"title",
"scientific_name",
"sample_description",
"geographic location (country and/or sea)",
"host common name",
"host health state",
"host sex",
"host scientific name",
"collector name",
"collection date",
"collecting institution",
"isolate",
]
else:
samples_cols = ["alias", "title", "scientific_name", "sample_description"]
try:
samples_dict = extract_data(xl_sheet, samples_cols)
except AssertionError as e:
print("Sheet ENA_sample: ", e)
raise
# PARSE EXPERIMENTS
#################
xl_sheet = read_ena_xlsx_sheet(xlsx_path, sheet_name="ENA_experiment")
if xl_sheet.shape[0] < 1:
raise ValueError("No experiments found in experiments sheet")
exp_columns = [
"alias",
"title",
"study_alias",
"sample_alias",
"design_description",
"library_name",
"library_strategy",
"library_source",
"library_selection",
"library_layout",
"insert_size",
"library_construction_protocol",
"platform",
"instrument_model",
]
try:
experiments_dict = extract_data(xl_sheet, exp_columns)
except AssertionError as e:
print("Sheet ENA_experiment: ", e)
raise
# PARSE RUNS SHEET
#################
xl_sheet = read_ena_xlsx_sheet(xlsx_path, sheet_name="ENA_run")
if xl_sheet.shape[0] < 1:
raise ValueError("No entries found in runs sheet")
run_cols = ["alias", "experiment_alias", "file_name", "file_format"]
try:
runs_dict = extract_data(xl_sheet, run_cols, unique_key="file_name")
except AssertionError as e:
print("Sheet ENA_run: ", e)
raise
# DROP COMMENTS
###############
studies_dict = {
k: v
for k, v in studies_dict.items()
if k in set([v["study_alias"] for k, v in experiments_dict.items()])
}
assert bool(studies_dict), "No entries found in studies"
experiments_dict = {
k: v
for k, v in experiments_dict.items()
if v["study_alias"] in studies_dict.keys()
}
assert bool(experiments_dict), "No entries found in experiments"
samples_dict = {
k: v
for k, v in samples_dict.items()
if k in set([v["sample_alias"] for k, v in experiments_dict.items()])
}
assert bool(samples_dict), "No entries found in samples"
runs_dict = {
k: v
for k, v in runs_dict.items()
if v["experiment_alias"] in experiments_dict.keys()
}
assert bool(runs_dict), "No entries found in runs"
# WRITE HEADERS TO TABLES
studies_table = open(pathlib.Path(out_path) / "studies.tsv", "w")
studies_table.write(
"\t".join(
[
"alias",
"status",
"accession",
"title",
"study_type",
"study_abstract",
"pubmed_id",
"submission_date",
]
)
+ "\n"
)
samples_table = open(pathlib.Path(out_path) / "samples.tsv", "w")
if viral_submission:
samples_table.write(
"\t".join(
[
"alias",
"status",
"accession",
"title",
"scientific_name",
"taxon_id",
"sample_description",
"collection_date",
"geographic_location",
"host_common_name",
"host_subject_id",
"host_health_state",
"host_sex",
"host_scientific_name",
"collector_name",
"collecting_institution",
"isolate",
"submission_date",
]
)
+ "\n"
)
else:
samples_table.write(
"\t".join(
[
"alias",
"status",
"accession",
"title",
"scientific_name",
"taxon_id",
"sample_description",
"submission_date",
]
)
+ "\n"
)
experiments_table = open(pathlib.Path(out_path) / "experiments.tsv", "w")
experiments_table.write(
"\t".join(
[
"alias",
"status",
"accession",
"title",
"study_alias",
"sample_alias",
"design_description",
"library_name",
"library_strategy",
"library_source",
"library_selection",
"library_layout",
"insert_size",
"library_construction_protocol",
"platform",
"instrument_model",
"submission_date",
]
)
+ "\n"
)
runs_table = open(pathlib.Path(out_path) / "runs.tsv", "w")
runs_table.write(
"\t".join(
[
"alias",
"status",
"accession",
"experiment_alias",
"file_name",
"file_format",
"file_checksum",
"submission_date",
]
)
+ "\n"
)
action = action
# WRITE DICTIONARIES TO TABLE FILES
# ADD A TIMESTAMP TO THE ALIAS? SEEMS LIKE ENA REQUIRES ALL ENTRIES FOR A WEBIN TO HAVE UNIQUE IDS?
# dt_oobj = datetime.now(tz=None)
# timestamp = dt_oobj.strftime("%Y%m%d_%H:%M:%S")
for study_alias, study in studies_dict.items():
# study_alias = study_alias + '_' + timestamp
studies_table.write(
"\t".join(
[
study_alias,
action,
"ENA_accession",
study["title"],
study["study_type"],
study["study_abstract"],
"",
"ENA_submission_data",
]
)
+ "\n"
) # assuming no pubmed_id
for sample_alias, sample in samples_dict.items():
# sample_alias = sample_alias + '_' + timestamp
if viral_submission:
if sample["collector name"] == "":
sample["collector name"] = "unknown"
samples_table.write(
"\t".join(
[
sample_alias,
action,
"ena_accession",
sample["title"],
sample["scientific_name"],
"tax_id_updated_by_ENA",
sample["sample_description"],
sample["collection date"],
sample["geographic location (country and/or sea)"],
sample["host common name"],
"host subject id",
sample["host health state"],
sample["host sex"],
sample["host scientific name"],
sample["collector name"],
sample["collecting institution"],
sample["isolate"],
"ENA_submission_date",
]
)
+ "\n"
)
else:
samples_table.write(
"\t".join(
[
sample_alias,
action,
"ena_accession",
sample["title"],
sample["scientific_name"],
"tax_id_updated_by_ENA",
sample["sample_description"],
]
)
+ "\n"
)
for exp_alias, exp in experiments_dict.items():
# should I check here if any experiment has a study or sample alias that is incorrect?
# (not listed in the samples or study dict)
# process the experiments for this sample
if exp["sample_alias"] == sample_alias:
if | pd.isnull(exp["library_name"]) | pandas.isnull |
'''
Created on Sep 30, 2018
@author: yewen
'''
import os
from datetime import datetime
import pandas as pd
import numpy as np
from scipy import stats
from scipy import optimize
from _ast import operator
import Equity_port.historical_price as historical_price
import Equity_port.finance as finance
import Crypto.technical_indicators as ti
class Alert:
def __init__(self, today):
xl = pd.ExcelFile('watchlist.xlsx')
self.watchlist = xl.parse('watchlist')
self.watchlist['Index'] = self.watchlist['Ticker']
self.watchlist.set_index('Index', inplace = True)
print(self.watchlist)
self.today = today
self.last_year = pd.Series(pd.date_range(end=self.today, periods=365)) #total calendar days
def get_price_history (self, history = None, workbook='Historical price.xlsx', sheet='Historical price'):
if history is None:
r = historical_price.get_price_history(workbook=workbook, sheet=sheet)
else:
r = history
#self.forex = r['forex']
#self.company_name = r['company_name']
#self.equity_currency = r['equity_currency']
self.equity_prices_local = r['equity_price_local'][self.watchlist.index]
#self.equity_prices_USD = r['equity_price_USD']
self.equity_daily_return_local = r['equity_daily_return_local'][self.watchlist.index]
#self.equity_daily_return_USD = r['equity_daily_return_USD']
def sharp(self):
df = | pd.DataFrame(index=self.last_year, columns=['Sharp10','SharpSigma']) | pandas.DataFrame |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = | tm.makeTimeSeries() | pandas.util.testing.makeTimeSeries |
'''
07 - Clustering the fish data
You'll now use your standardization and clustering pipeline from the previous
exercise to cluster the fish by their measurements, and then create a cross-
tabulation to compare the cluster labels with the fish species.
As before, samples is the 2D array of fish measurements. Your pipeline is available
as pipeline, and the species of every fish sample is given by the list species.
INSTRUCTIONS
- Import pandas as pd.
- Fit the pipeline to the fish measurements samples.
- Obtain the cluster labels for samples by using the .predict() method of pipeline.
- Using pd.DataFrame(), create a DataFrame df with two columns named 'labels' and
'species', using labels and species, respectively, for the column values.
- Using pd.crosstab(), create a cross-tabulation ct of df['labels'] and df['species'].
'''
# Import pandas
import pandas as pd
# Fit the pipeline to samples
pipeline.fit(samples)
# Calculate the cluster labels: labels
labels = pipeline.predict(samples)
# Create a DataFrame with labels and species as columns: df
df = pd.DataFrame({'labels': labels, 'species': species})
# Create crosstab: ct
ct = | pd.crosstab(df['labels'], df['species']) | pandas.crosstab |
#-------------------------------------------------------------------------------
# Name: GIS Viewer Attribution Evaluation
# Version: V_2.0
# Purpose: Produce report for installation geodatabase detailing data attribution
#
# Author: <NAME>
#
# Created: 2018/01/26
# Last Update: 2018/03/22
# Description: Evaluate installation geodatabases for minimum attribution required
# by AFCEC GIS viewer for best display of data.
#-------------------------------------------------------------------------------
# Import modules
import arcpy, os, numpy, pandas
from pandas import DataFrame
from datetime import datetime
# Start time
timenow = datetime.now()
print(timenow)
# WHICH FEATURE DATASETS ARE MISSING FROM THE INSTALLATION DATABASE COMPARED TO COMPARISON DATABASE
missFDSTable = arcpy.GetParameterAsText(0)
# WITHIN THE FEATURE DATASETS THAT THE INSTALLATION HAS,
# WHICH FEATURE CLASSES ARE MISSING?
missFCTable = arcpy.GetParameterAsText(1)
# WITHIN EACH REQUIRED FEATURE DATASET AND FEATURE CLASS THAT THE INSTALLATION HAS,
# WHICH FIELDS ARE MISSING?
missFLDTable = arcpy.GetParameterAsText(2)
# WITHIN EACH REQUIRED FEATURE DATASET AND FEATURE CLASS THAT THE INSTALLATION HAS,
# WHICH FIELDS ARE MISSING?
nullTable = arcpy.GetParameterAsText(3)
outputFile = arcpy.GetParameterAsText(4)
# =============================================================================
# missFDSTable = os.path.join(installGDB,"CIP_MissingFDS")
# missFCTable = os.path.join(installGDB,"CIP_MissingFCs")
# missFLDTable = os.path.join(installGDB,"CIP_MissingFields")
# nullTable = os.path.join(installGDB,"CIP_MissingData")
# =============================================================================
# to get dataframe of feature datasets, feature classes, and fields of geodatabase
def getFeaturesdf(GDB):
'''
# to get unique FDS, FC, and FIELDS across a geodatabase
Parameters
----------
GDB = path to GDB
Returns
-------
pandas dataframe of with two columns: Feature Dataset, Feature Class for each fc in gdb.
'''
d = pandas.DataFrame([])
arcpy.env.workspace = GDB
for theFDS in arcpy.ListDatasets():
for theFC in arcpy.ListFeatureClasses(feature_dataset=theFDS):
minFields = (fld.name.upper() for fld in arcpy.ListFields(os.path.join(GDB,theFDS,theFC)) if str(fld.name) not in ['Shape', 'OBJECTID', 'Shape_Length', 'Shape_Area'])
minFields = list(minFields)
for FLD in minFields:
d = d.append(pandas.DataFrame({'FDS': str(theFDS), 'FC': str(theFC), 'FLD': str(FLD.name)}, index=[0]), ignore_index=True)
return(d)
# to get field name of a ArcGIS table
def get_field_names(table):
"""
Get a list of field names not inclusive of the geometry and object id fields.
Parameters
----------
table: Table readable by ArcGIS
Returns
-------
List of field names.
"""
# list to store values
field_list = []
# iterate the fields
for field in arcpy.ListFields(table):
# if the field is not geometry nor object id, add it as is
if field.type != 'Geometry' and field.type != 'OID':
field_list.append(field.name)
# if geomtery is present, add both shape x and y for the centroid
elif field.type == 'Geometry':
field_list.append('SHAPE@XY')
# return the field list
return field_list
# to convert arcgis table to pandas dataframe
def table_to_pandas_dataframe(table, field_names=None):
"""
Load data into a Pandas Data Frame from esri geodatabase table for subsequent analysis.
Parameters
----------
table = Table readable by ArcGIS.
field_names: List of fields.
Returns
-------
Pandas DataFrame object.
"""
# if field names are not specified
if not field_names:
# get a list of field names
field_names = get_field_names(table)
# create a pandas data frame
dataframe = DataFrame(columns=field_names)
# use a search cursor to iterate rows
with arcpy.da.SearchCursor(table, field_names) as search_cursor:
# iterate the rows
for row in search_cursor:
# combine the field names and row items together, and append them
dataframe = dataframe.append(
dict(zip(field_names, row)),
ignore_index=True
)
# return the pandas data frame
return dataframe
def get_geodatabase_path(input_table):
'''Return the Geodatabase path from the input table or feature class.
:param input_table: path to the input table or feature class
'''
workspace = os.path.dirname(input_table)
if [any(ext) for ext in ('.gdb', '.mdb', '.sde') if ext in os.path.splitext(workspace)]:
return workspace
else:
return os.path.dirname(workspace)
# to get a pandas dataframe into a arcgis table
def pandas_to_table(pddf,tablename):
'''
Parameters
----------
pddf = pandas dataframe
tablename = output table name to 'installGDB'
Returns
-------
a geodatabase table from pandas dataframe inside 'installGDB' geodatabase object (string to .gdb path)
'''
x = numpy.array(numpy.rec.fromrecords(pddf))
names = pddf.dtypes.index.tolist()
x.dtype.names = tuple(names)
gdbTbl = os.path.join(installGDB,tablename)
if arcpy.Exists(gdbTbl):
arcpy.Delete_management(gdbTbl)
arcpy.da.NumPyArrayToTable(x, gdbTbl)
def summariseMissingData(installGDB):
start_time = datetime.now()
arcpy.env.workspace = installGDB
installationName = os.path.splitext(os.path.basename(installGDB))[0]
tb = os.path.splitext(os.path.basename(nullTable))[0]
compName = tb.split("_")[0]
# output table names, with comparison geodatabase name prepended
# missingFDTblName=compName+"_MissingFDS"
# missingFCTblName=compName+"_MissingFCs"
# missingFLDTblName=compName+"_MissingFields"
# nullTableName=compName+"_MissingData"
#
## CONVERT TABLES TO PANDAS DATAFRAMES
pdNullTbl= table_to_pandas_dataframe(nullTable, field_names=None)
pdFLDTbl= table_to_pandas_dataframe(missFLDTable, field_names=None)
pdFCTbl= table_to_pandas_dataframe(missFCTable, field_names=None)
pdFDSTbl= table_to_pandas_dataframe(missFDSTable, field_names=None)
# replace cells with '' as NaN
pdNullTbl = pdNullTbl.replace('', numpy.nan)
pdFLDTbl = pdFLDTbl.replace('', numpy.NaN)
pdFCTbl = pdFCTbl.replace('', numpy.NaN)
pdFDSTbl = pdFDSTbl.replace('', numpy.NaN)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE INDETERMINANT
arcpy.AddMessage ("Getting count of indeterminant cells per feature class")
indtCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['TOTAL_INDT_COUNT'].agg('sum').fillna(0).reset_index()
indtCntByFC=pandas.DataFrame(indtCntByFC)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE INDETERMINANT
arcpy.AddMessage ("Getting count of indeterminant cells per feature class")
detCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['TOTAL_DET_COUNT'].agg('sum').fillna(0).reset_index()
detCntByFC=pandas.DataFrame(detCntByFC)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE NULL
## THEN EXPORT THEM TO THE GEODATABASE
arcpy.AddMessage ("Getting count of 'null' cells per feature class")
nullCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['NULL_FC_COUNT'].agg('sum').fillna(0).reset_index()
nullCntByFC= | pandas.DataFrame(nullCntByFC) | pandas.DataFrame |
from __future__ import print_function
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import pandas as pd
SPREADSHEET_ID = '1XoJEbDUAXoqsTB44FoXFirHdx_F8TtO8-nByFpHNoqk' # <Your spreadsheet ID>
RANGE_NAME = 'member' # <Your worksheet name>
def get_google_sheet(spreadsheet_id, range_name):
""" Retrieve sheet data using OAuth credentials and Google Python API. """
scopes = 'https://www.googleapis.com/auth/spreadsheets.readonly'
# Setup the Sheets API
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', scopes)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
# Call the Sheets API
gsheet = service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_name).execute()
return gsheet
def gsheet2df(gsheet):
""" Converts Google sheet data to a Pandas DataFrame.
Note: This script assumes that your data contains a header file on the first row!
Also note that the Google API returns 'none' from empty cells - in order for the code
below to work, you'll need to make sure your sheet doesn't contain empty cells,
or update the code to account for such instances.
"""
header = gsheet.get('values', [])[0] # Assumes first line is header!
values = gsheet.get('values', [])[1:] # Everything else is data.
if not values:
print('No data found.')
else:
all_data = []
for col_id, col_name in enumerate(header):
column_data = []
for row in values:
column_data.append(row[col_id])
ds = | pd.Series(data=column_data, name=col_name) | pandas.Series |
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pandas._testing as tm
import quantopy as qp
class TestReturnDataFrame:
def test_from_price(self):
rdf = qp.ReturnDataFrame.from_price([80, 85, 90])
assert type(rdf) is qp.ReturnDataFrame
assert_allclose(
rdf,
[[0.0625], [0.058824]],
rtol=1e-1,
)
assert_allclose(
qp.ReturnDataFrame.from_price(
{"stock_1": [80, 85, 90], "stock_2": [10, 20, 30]}
),
[[0.0625, 1.0], [0.058824, 0.5]],
rtol=1e-1,
)
assert_allclose(
qp.ReturnDataFrame.from_price(
| pd.DataFrame({"stock_1": [80, 85, 90], "stock_2": [10, 20, 30]}) | pandas.DataFrame |
from dataset_loader import *
from utils import *
from model import BeautyModel
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style
from PIL import Image
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
IMG_SIZE=224
CHANNELS=3
class Inferencing:
def __init__(self, model_path, device_):
with tf.device(device_):
self.device = device_
# self.model = BeautyModel().create_model()
self.model = tf.keras.models.load_model(model_path, compile=False)
# self.model.summary()
def create_confusion_matrix(self, model):
dataloader = BeautyDataLoader()
label_names = dataloader.get_label_names()
y_val_bin = dataloader.dataset_split['valid_y']
dataset = dataloader.create_dataset(fold=0)
val_dataset = dataset['valid']
target = y_val_bin[0]
df = perf_grid(self.device, val_dataset, target, label_names, model)
# Get the maximum F1-score for each label when using the second model and varying the threshold
print(df.head(10))
# return df, label_names
def get_predictions(self, filenames, labels, model):
from keras.preprocessing import image
org_dataset = pd.read_csv('Dataset/beauty_dataset.csv')
# Get movie info
nrows = 5
ncols = 2
fig = plt.gcf()
fig.set_size_inches(ncols * 10, nrows * 10)
# print(filenames)
for i, img_path in enumerate(filenames):
gt = org_dataset.loc[org_dataset['file_path'] == img_path, ['isbeauty', 'skill']].iloc[0]
k,l = gt
# Read and prepare image
img = image.load_img(img_path, target_size=(IMG_SIZE,IMG_SIZE,CHANNELS))
img = image.img_to_array(img)
img = img/255
img = np.expand_dims(img, axis=0)
# Generate prediction
score = model.predict(img)
prediction = (score > 0.5).astype('int')
prediction = pd.Series(prediction[0])
prediction.index = labels
prediction = prediction[prediction==1].index.values
# Dispaly image with prediction
# style.use('default')
# plt.figure(figsize=(8,4))
# plt.imshow(Image.open(img_path))
# plt.show()
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off') # Don't show axes (or gridlines)
plt.imshow(Image.open(img_path))
file_ = os.path.basename(img_path)
plt.title('\n\n{}\n\GT\n{}\n\nPrediction\n{}\n'.format(file_, (k,l), list(prediction)), fontsize=9)
plt.savefig('./logs/predictions.png')
if __name__ == "__main__":
inference = Inferencing(device_='/gpu:0',model_path='./ckpt/20210226-130849/0/')
print(inference.model.summary())
df, labels = inference.create_confusion_matrix(inference.model)
org_dataset = | pd.read_csv('Dataset/beauty_dataset.csv') | pandas.read_csv |
import lightgbm as lgbm
import optuna
from scipy import sparse as ssp
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
def Gini(y_true, y_pred):
# check and get number of samples
assert y_true.shape == y_pred.shape
n_samples = y_true.shape[0]
# sort rows on prediction column
# (from largest to smallest)
arr = np.array([y_true, y_pred]).transpose()
true_order = arr[arr[:, 0].argsort()][::-1, 0]
pred_order = arr[arr[:, 1].argsort()][::-1, 0]
# get Lorenz curves
L_true = np.cumsum(true_order) * 1. / np.sum(true_order)
L_pred = np.cumsum(pred_order) * 1. / np.sum(pred_order)
L_ones = np.linspace(1 / n_samples, 1, n_samples)
# get Gini coefficients (area between curves)
G_true = np.sum(L_ones - L_true)
G_pred = np.sum(L_ones - L_pred)
# normalize to true Gini coefficient
return G_pred * 1. / G_true
cv_only = True
save_cv = True
full_train = False
def evalerror(preds, dtrain):
labels = dtrain.get_label()
return 'gini', Gini(labels, preds), True
path = "data/input/"
train = pd.read_csv(path+'train.csv')
#train = train.sample(frac=0.1, random_state=0).reset_index(drop=True)
train_label = train['target']
train_id = train['id']
test = pd.read_csv(path+'test.csv')
#test = test.sample(frac=0.1, random_state=0).reset_index(drop=True)
test_id = test['id']
NFOLDS = 4
kfold = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=218)
y = train['target'].values
drop_feature = [
'id',
'target'
]
X = train.drop(drop_feature,axis=1)
feature_names = X.columns.tolist()
cat_features = [c for c in feature_names if ('cat' in c and 'count' not in c)]
num_features = [c for c in feature_names if ('cat' not in c and 'calc' not in c)]
train['missing'] = (train==-1).sum(axis=1).astype(float)
test['missing'] = (test==-1).sum(axis=1).astype(float)
num_features.append('missing')
train.shape
for c in cat_features:
le = LabelEncoder()
le.fit(train[c])
train[c] = le.transform(train[c])
test[c] = le.transform(test[c])
# 事前にlabelEncoderを行っているから、この使い方でユニークな値で割り当てられる。引数categories = 'auto'で警告を消す
enc = OneHotEncoder(categories='auto')
enc.fit(train[cat_features])
X_cat = enc.transform(train[cat_features])
X_t_cat = enc.transform(test[cat_features])
ind_features = [c for c in feature_names if 'ind' in c]
count=0
for c in ind_features:
if count == 0:
train['new_ind'] = train[c].astype(str)+'_'
test['new_ind'] = test[c].astype(str)+'_'
count += 1
else:
train['new_ind'] += train[c].astype(str)+'_'
test['new_ind'] += test[c].astype(str)+'_'
cat_count_features = []
for c in cat_features+['new_ind']:
d = pd.concat([train[c],test[c]]).value_counts().to_dict()
train['%s_count'%c] = train[c].apply(lambda x:d.get(x,0))
test['%s_count'%c] = test[c].apply(lambda x:d.get(x,0))
cat_count_features.append('%s_count'%c)
train_list = [train[num_features+cat_count_features].values, X_cat]
test_list = [test[num_features+cat_count_features].values, X_t_cat]
X = ssp.hstack(train_list).tocsr()
X_test = ssp.hstack(test_list).tocsr()
def objective(trial):
drop_rate = trial.suggest_uniform('drop_rate', 0, 1.0)
feature_fraction = trial.suggest_uniform('feature_fraction', 0, 1.0)
learning_rate = trial.suggest_uniform('learning_rate', 0, 1.0)
subsample = trial.suggest_uniform('subsample', 0.8, 1.0)
num_leaves = trial.suggest_int('num_leaves', 5, 1000)
verbosity = trial.suggest_int('verbosity', -1, 1)
num_boost_round = trial.suggest_int('num_boost_round', 10, 100000)
min_data_in_leaf = trial.suggest_int('min_data_in_leaf', 10, 100000)
min_child_samples = trial.suggest_int('min_child_samples', 5, 500)
min_child_weight = trial.suggest_int('min_child_weight', 5, 500)
params = {"objective": "binary",
"boosting_type": "gbdt",
"learning_rate": learning_rate,
"num_leaves": num_leaves,
"max_bin": 256,
"feature_fraction": feature_fraction,
"verbosity": verbosity,
"drop_rate": drop_rate,
"is_unbalance": False,
"max_drop": 50,
"min_child_samples": min_child_samples,
"min_child_weight": min_child_weight,
"min_split_gain": 0,
"min_data_in_leaf": min_data_in_leaf,
"subsample": subsample
}
x_score = []
final_cv_train = np.zeros(len(train_label))
final_cv_pred = np.zeros(len(test_id))
cv_train = np.zeros(len(train_label))
cv_pred = np.zeros(len(test_id))
params['seed'] = 0
kf = kfold.split(X, train_label)
best_trees = []
fold_scores = []
for i, (train_fold, validate) in enumerate(kf):
print('kfold_index:', i)
X_train, X_validate, label_train, label_validate = \
X[train_fold, :], X[validate, :], train_label[train_fold], train_label[validate]
dtrain = lgbm.Dataset(X_train, label_train)
dvalid = lgbm.Dataset(X_validate, label_validate, reference=dtrain)
bst = lgbm.train(params, dtrain, num_boost_round, valid_sets=dvalid, feval=evalerror, verbose_eval=100,
early_stopping_rounds=100)
best_trees.append(bst.best_iteration)
cv_pred += bst.predict(X_test, num_iteration=bst.best_iteration)
cv_train[validate] += bst.predict(X_validate)
score = Gini(label_validate, cv_train[validate])
print(score)
fold_scores.append(score)
cv_pred /= NFOLDS
final_cv_train += cv_train
final_cv_pred += cv_pred
print("cv score:")
print(Gini(train_label, cv_train))
print("current score:", Gini(train_label, final_cv_train / (params['seed'] + 1.)), params['seed']+1)
print(fold_scores)
print(best_trees, np.mean(best_trees))
x_score.append(Gini(train_label, cv_train))
print(x_score)
pd.DataFrame({'id': test_id, 'target': final_cv_pred / 16.}).to_csv('data/output/optuna_lgbm3_pred_avg_2.csv', index=False)
| pd.DataFrame({'id': train_id, 'target': final_cv_train / 16.}) | pandas.DataFrame |
# Author: <NAME>, PhD
# University of Los Angeles California
import os
import sys
import re
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy import optimize
from itertools import groupby
# from https://gist.github.com/walkermatt/2871026
from threading import Timer
def debounce(wait):
""" Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked. """
def decorator(fn):
def debounced(*args, **kwargs):
def call_it():
fn(*args, **kwargs)
try:
debounced.t.cancel()
except(AttributeError):
pass
debounced.t = Timer(wait, call_it)
debounced.t.start()
return debounced
return decorator
#############################################################################
# --------- Natural Abundance Correction CLASS -----------------------------#
#############################################################################
class NAProcess:
# Adapted from IsoCor code (https://github.com/MetaSys-LISBP/IsoCor)
##################
## Init and setup
##################
def __init__(self, entry, atomTracer="H", purityTracer=[0, 1], FAMES=True, CHOL=False):
self.NaturalAbundanceDistributions = self.__getNaturalAbundanceDistributions()
self.formula = self.getFAFormulaString(entry, FAMES, CHOL)
self.elementsDict = self.parseFormula(self.formula)
self.atomTracer = atomTracer
self.purityTracer = purityTracer
self.correctionMatrix = self.computeCorrectionMatrix(self.elementsDict, self.atomTracer, self.NaturalAbundanceDistributions, purityTracer)
def getFAFormulaString(self, entry, FAMES, CHOL=False):
''' Return formula string e.g.: C3H2O3'''
regex = "C([0-9]+):([0-9]+)"
carbon,doubleBond = [int(val) for val in re.findall(regex, entry)[0]]
hydrogen = 3+(carbon-2)*2+1-2*doubleBond
oxygen = 2
silicon = 0
if (FAMES):
carbon=carbon+1
hydrogen=hydrogen-1+3
if (CHOL):
carbon, hydrogen, oxygen, silicon = 30, 54, 1, 1
return "".join(["".join([letter,str(n)]) for [letter,n] in [
["C", carbon],
["H", hydrogen],
["Si", silicon],
["O", oxygen]] if n>0])
def parseFormula(self, formula):
"""
Parse the elemental formula and return the number
of each element in a dictionnary d={'El_1':x,'El_2':y,...}.
"""
regex = f"({'|'.join(self.NaturalAbundanceDistributions.keys())})([0-9]{{0,}})"
elementDict = dict((element, 0) for element in self.NaturalAbundanceDistributions.keys())
for element,n in re.findall(regex, formula):
if n:
elementDict[element] += int(n)
else:
elementDict[element] += 1
return elementDict
def __getNaturalAbundanceDistributions(self):
'''Return a dictionary of the isotopic proportions at natural abundance
desribed in https://www.ncbi.nlm.nih.gov/pubmed/27989585'''
H1, H2 = 0.999885, 0.000115
C12, C13 = 0.9893, 0.0107
N14, N15 = 0.99632, 0.00368
O16, O17, O18 = 0.99757, 0.00038, 0.00205
Si28, Si29, Si30 = 0.922297, 0.046832, 0.030872
S32, S33, S34, S36 = 0.9493, 0.0076, 0.0429, 0.0002
return {'H': np.array([H1, H2]), # hydrogen
'C': np.array([C12, C13]), # carbon
'N': np.array([N14, N15]), # nitrogen
'O': np.array([O16, O17, O18]), # oxygen
'Si': np.array([Si28, Si29, Si30]), # silicon
'S': np.array([S32, S33, S34, S36])} # sulphur
def __calculateMassDistributionVector(self, elementDict, atomTracer, NADistributions):
"""
Calculate a mass distribution vector (at natural abundancy),
based on the elemental compositions of metabolite.
The element corresponding to the isotopic tracer is not taken
into account in the metabolite moiety.
"""
result = np.array([1.])
for atom,n in elementDict.items():
if atom not in [atomTracer]:
for i in range(n):
result = np.convolve(result, NADistributions[atom])
return result
def computeCorrectionMatrix(self, elementDict, atomTracer, NADistributions, purityTracer):
# calculate correction vector used for correction matrix construction
# it corresponds to the mdv at natural abundance of all elements except the
# isotopic tracer
correctionVector = self.__calculateMassDistributionVector(elementDict, atomTracer, NADistributions)
# check if the isotopic tracer is present in formula
try:
nAtomTracer = elementDict[atomTracer]
except:
print("The isotopic tracer must to be present in the metabolite formula!")
tracerNADistribution = NADistributions[atomTracer]
m = 1+nAtomTracer*(len(tracerNADistribution)-1)
c = len(correctionVector)
if m > c + nAtomTracer*(len(tracerNADistribution)-1):
print("There might be a problem in matrix size.\nFragment does not contains enough atoms to generate this isotopic cluster.")
if c < m:
# padd with zeros
correctionVector.resize(m)
# create correction matrix
correctionMatrix = np.zeros((m, nAtomTracer+1))
for i in range(nAtomTracer+1):
column = correctionVector[:m]
for na in range(i):
column = np.convolve(column, purityTracer)[:m]
for nb in range(nAtomTracer-i):
column = np.convolve(column, tracerNADistribution)[:m]
correctionMatrix[:,i] = column
return correctionMatrix
##################
## Data processing
##################
def _computeCost(self, currentMID, target, correctionMatrix):
"""
Cost function used for BFGS minimization.
return : (sum(target - correctionMatrix * currentMID)^2, gradient)
"""
difference = target - np.dot(correctionMatrix, currentMID)
# calculate sum of square differences and gradient
return (np.dot(difference, difference), np.dot(correctionMatrix.transpose(), difference)*-2)
def _minimizeCost(self, args):
'''
Wrapper to perform least-squares optimization via the limited-memory
Broyden-Fletcher-Goldfarb-Shanno algorithm, with an explicit lower boundary
set to zero to eliminate any potential negative fractions.
'''
costFunction, initialMID, target, correctionMatrix = args
res = optimize.minimize(costFunction, initialMID, jac=True, args=(target, correctionMatrix),
method='L-BFGS-B', bounds=[(0., float('inf'))]*len(initialMID),
options={'gtol': 1e-10, 'eps': 1e-08, 'maxiter': 15000, 'ftol': 2.220446049250313e-09,
'maxcor': 10, 'maxfun': 15000})
return res
def correctForNaturalAbundance(self, dataFrame, method="LSC"):
'''
Correct the Mass Isotope Distributions (MID) from a given dataFrame.
Method: SMC (skewed Matrix correction) / LSC (Least Squares Skewed Correction)
'''
correctionMatrix = self.computeCorrectionMatrix(self.elementsDict, self.atomTracer, self.NaturalAbundanceDistributions, self.purityTracer)
nRows, nCols = correctionMatrix.shape
# ensure compatible sizes (will extend data)
if nCols<dataFrame.shape[1]:
print("The measure MID has more clusters than the correction matrix.")
else:
dfData = np.zeros((len(dataFrame), nCols))
dfData[:dataFrame.shape[0], :dataFrame.shape[1]] = dataFrame.values
if method == "SMC":
# will mltiply the data by inverse of the correction matrix
correctionMatrix = np.linalg.pinv(correctionMatrix)
correctedData = np.matmul(dfData, correctionMatrix.transpose())
# flatten unrealistic negative values to zero
correctedData[correctedData<0] = 0
elif method == "LSC":
# Prepare multiprocessing optimization
targetMIDList = dfData.tolist()
initialMID = np.zeros_like(targetMIDList[0])
argsList = [(self._computeCost, initialMID, targetMID, correctionMatrix) for targetMID in targetMIDList]
# minimize for each MID
allRes = [self._minimizeCost(args) for args in argsList]
correctedData = np.vstack([res.x for res in allRes])
return pd.DataFrame(columns=dataFrame.columns, data=correctedData[:, :dataFrame.shape[1]])
#############################################################################
# --------- DATA OBJECT CLASS ----------------------------------------------#
#############################################################################
class MSDataContainer:
##################
## Init and setup
##################
def __init__(self, fileNames, internalRef="C19:0", tracer="H", tracerPurity=[0.00, 1.00]):
assert len(fileNames)==2 , "You must choose 2 files!"
self.internalRef = internalRef
self._cholesterol = False
self.tracer = tracer
self.tracerPurity = tracerPurity
self.NACMethod = "LSC" # least squares skewed matrix correction
self.dataFileName, self.templateFileName = self.__getDataAndTemplateFileNames(fileNames)
self._baseFileName = os.path.basename(self.dataFileName).split('.')[0]
self.pathDirName = os.path.dirname(self.dataFileName)
self.__regexExpression = {"Samples": '^(?!neg|S\d+$)',
"colNames": '(\d+)_(\d+)(?:\.\d+)?_(\d+)'}
self.dataDf = self._computeFileAttributes()
self.__standardDf_template = self.__getStandardsTemplateDf()
self.volumeMixTotal = 500
self.volumeMixForPrep = 100
self.volumeStandards = [1, 5, 10, 20, 40, 80]
self.standardDf_nMoles = self.computeStandardMoles()
# for normalization
# volume (uL) in which the original sample was diluted
samplesLoc = self.dataDf.SampleName.str.match(self.__regexExpression["Samples"], na=False)
self.numberOfSamples = len(self.dataDf.loc[samplesLoc])
self.volumesOfDilution = [750]*self.numberOfSamples
# volume (uL) of sample used in MS
self.volumesOfSampleSoupUsed = [5]*self.numberOfSamples
self.weightNormalization = False
def __getDataAndTemplateFileNames(self, fileNames, templateKeyword="template"):
'''Classify files (data or template) based on fileName'''
dataFileName = [fileName for fileName in fileNames if templateKeyword not in fileName][0]
templateFileName = [fileName for fileName in fileNames if fileName != dataFileName][0]
return [dataFileName, templateFileName]
def __parseIon(self, ion):
ionID, ionMass, ionDescription = re.findall(self.__regexExpression["colNames"], ion)[0]
return {"id": int(ionID), "mass": int(ionMass), "description": ionDescription}
def __parseSampleColumns(self, columnNames):
# indexed ions from columns
ions = map(lambda name: self.__parseIon(name), columnNames)
return list(ions)#[self.__getIndexedIon(ion, i) for i,ion in enumerate(ions)]
def __isLabeledExperiment(self, ionsDetected):
# if more than 40% of the ions are duplicate, it probably means that the file is
# from a labeled experimnets (lots of fragments for each ion)
ionsList = list(map(lambda ion: ion["description"], ionsDetected))
uniqueIons = set(ionsList)
return len(uniqueIons)/len(ionsList) < 0.6
def __getIonParentedGroups(self, ionsDetected):
# groupby parental ions (save initial index for sorting later)
groupedIons = groupby(enumerate(ionsDetected), key=lambda ion: ion[1]['description'])
groupsIntraSorted = list(map(lambda group: (group[0], sorted(group[1], key=lambda ion: ion[1]['mass'])), groupedIons))
# split groups if most abundant ion present
finalGroups = []
for key,group in groupsIntraSorted:
# only process groups that have more than 1 ion
if len(group) != 1:
masses = np.array([ion[1]["mass"] for ion in group])
differences = masses[1:]-masses[0:-1]
idx = np.where(differences != 1)
# and that have non unitary jumps in differences from ion to ion
if len(idx[0])>0:
start = 0
for i in range(len(idx[0])+1):
if i < len(idx[0]):
end = idx[0][i]+1
subgroup = group[start:end]
start = idx[0][i] + 1
finalGroups.append((f"{key}-{i}", subgroup))
else:
subgroup = group[start:]
finalGroups.append((f"{key}-{i}", subgroup))
else:
finalGroups.append((key, group))
else:
finalGroups.append((key, group))
return finalGroups
def _computeFileAttributes(self):
# extract columns
columnsOfInterest = pd.read_excel(self.dataFileName, nrows=2).filter(regex=self.__regexExpression["colNames"]).columns
# load data and template files and isolate data part
df = pd.read_excel(self.dataFileName, skiprows=1)
templateMap = pd.read_excel(self.templateFileName, sheet_name="MAP")
# check if cholesterol experiment
letter = df["Name"][0][0] # F or C
df_Meta,df_Data = self.__getOrderedDfBasedOnTemplate(df, templateMap, letter)
if letter == "C":
self._cholesterol = True
# assign columns names
ionsDetected = self.__parseSampleColumns(columnsOfInterest)
self.dataColNames = [f"C{ion['description'][:2]}:{ion['description'][2:]} ({ion['mass']})" for ion in ionsDetected]
self.internalRefList = self.dataColNames
self.experimentType = "Not Labeled"
# Check if this is a labeled experiment.
# If it is, need to rework the columns names by adding info of non parental ion
if self.__isLabeledExperiment(ionsDetected):
self.experimentType = "Labeled"
# split groups if most abundant ion present
finalGroups = self.__getIonParentedGroups(ionsDetected)
if letter == "F":
startM = 0
else:
assert len(finalGroups) == 2, "For cholesterol experiment we only expect 2 parental ions!"
startM = -2
sortedIonNames = [(idx, f"C{ion['description'][:2]}:{ion['description'][2:]} ({group[0][1]['mass']}) M.{n}") for (key,group) in finalGroups for n,(idx, ion) in enumerate(group)]
orderedIdx,orderedIonNames = zip(*sortedIonNames)
# reorder the columns by ions
df_Data = df_Data.iloc[:, list(orderedIdx)]
self.dataColNames = orderedIonNames
# only parental ions for internalRefList
self.internalRefList = [ f"C{carbon.split('-')[0][:2]}:{carbon.split('-')[0][2:]} ({group[0][1]['mass']})" for (carbon, group) in finalGroups]
df_Data.columns = self.dataColNames
# get sample meta info from template file
df_TemplateInfo = self.__getExperimentMetaInfoFromMAP(templateMap)
assert len(df_TemplateInfo)==len(df_Data), \
f"The number of declared samples in the template (n={len(df_TemplateInfo)}) does not match the number of samples detected in the data file (n={len(df_Data)})"
# save the number of columns (meta info) before the actual data
self._dataStartIdx = len(df_Meta.columns)+len(df_TemplateInfo.columns)
if (letter == "F") | (self.experimentType == "Not Labeled"):
dataDf = pd.concat([df_Meta, df_TemplateInfo, df_Data.fillna(0)], axis=1)
else:
# if chol experiment, remove the M.-2 and M.-1
dataDf = pd.concat([df_Meta, df_TemplateInfo, df_Data.iloc[:, 2:].fillna(0)], axis=1)
# but save a copy with everything for posterity
self.dataDf_chol = pd.concat([df_Meta, df_TemplateInfo, df_Data.fillna(0)], axis=1)
return dataDf
def __getOrderedDfBasedOnTemplate(self, df, templateMap, letter="F", skipCols=7):
'''Get new df_Data and df_Meta based on template'''
# reorder rows based on template and reindex with range
newOrder = list(map(lambda x: f"{letter}{x.split('_')[1]}", templateMap.SampleID[templateMap.SampleName.dropna().index].values))[:len(df)]
df.index=df["Name"]
df = df.reindex(newOrder)
df.index = list(range(len(df)))
df_Meta = df[["Name", "Data File"]]
df_Data = df.iloc[:, skipCols:] # 7 first cols are info
return df_Meta, df_Data
def __getExperimentMetaInfoFromMAP(self, templateMap):
'''Return the meta info of the experiment'''
# keep only rows with declared names
declaredIdx = templateMap.SampleName.dropna().index
templateMap = templateMap.loc[declaredIdx]
templateMap.index = range(len(templateMap)) # in case there were missing values
# fill in missing weights with 1
templateMap.loc[templateMap.SampleWeight.isna(), "SampleWeight"]=1
return templateMap[["SampleID", "SampleName", "SampleWeight", "Comments"]]
def __getStandardsTemplateDf(self, sheetKeyword="STANDARD"):
'''Loads the correct sheet for standard and returns it'''
sheetName = f"{sheetKeyword}_{'_'.join(self.experimentType.upper().split(' '))}"
templateStandard = pd.read_excel(self.templateFileName, sheet_name=sheetName)
return templateStandard
def __makeResultFolder(self):
if self._cholesterol:
suffix = "-CHOL"
else:
suffix = ""
directory = f"{self.pathDirName}/results-{self._baseFileName}{suffix}"
if not os.path.exists(directory):
os.mkdir(directory)
return directory
##################
## Analysis and Updates
##################
def updateInternalRef(self, newInternalRef):
'''Update FAMES chosen as internal reference and normalize data to it'''
print(f"Internal Reference changed from {self.internalRef} to {newInternalRef}")
self.internalRef = newInternalRef
self.dataDf_norm = self.computeNormalizedData()
def updateStandards(self, volumeMixForPrep, volumeMixTotal, volumeStandards):
self.volumeMixForPrep = volumeMixForPrep
self.volumeMixTotal = volumeMixTotal
self.volumeStandards = volumeStandards
self.standardDf_nMoles = self.computeStandardMoles()
def computeStandardMoles(self):
'''Calculate nMoles for the standards'''
template = self.__standardDf_template.copy()
template["Conc in Master Mix (ug/ul)"] = template["Stock conc (ug/ul)"]*template["Weight (%)"]/100*self.volumeMixForPrep/self.volumeMixTotal
# concentration of each carbon per standard volume
for ul in self.volumeStandards:
template[f"Std-Conc-{ul}"]=ul*(template["Conc in Master Mix (ug/ul)"]+template["Extra"])
# nMol of each FAMES per standard vol
for ul in self.volumeStandards:
template[f"Std-nMol-{ul}"] = 1000*template[f"Std-Conc-{ul}"]/template["MW"]
# create a clean template with only masses and carbon name
templateClean = pd.concat([template.Chain, template.filter(like="Std-nMol")], axis=1).transpose()
templateClean.columns = [f"C{chain} ({int(mass)})" for chain,mass in zip(self.__standardDf_template.Chain, self.__standardDf_template.MW)]
templateClean = templateClean.iloc[1:]
return templateClean
def getStandardAbsorbance(self):
'''Get normalized absorbance data for standards'''
matchedLocations = self.dataDf_norm.SampleName.str.match('S[0-9]+', na=False)
return self.dataDf_norm.loc[matchedLocations]
def updateTracer(self, newTracer):
self.tracer = newTracer
print(f"The tracer has been updated to {newTracer}")
self.computeNACorrectionDf()
def updateTracerPurity(self, newPurity):
self.tracerPurity = newPurity
self.computeNACorrectionDf()
def updateNACMethod(self, newMethod):
self.NACMethod = newMethod
print(f"The correction method for natural abundance has been updated to {newMethod}")
self.computeNACorrectionDf()
def updateVolumesOfSampleDilution(self, newVolumeOfDilution, newVolumeOfSampleUsed, useValueDilution=True, useValueSample=True):
if useValueDilution:
self.volumesOfDilution = [newVolumeOfDilution]*self.numberOfSamples
if useValueSample:
self.volumesOfSampleSoupUsed = [newVolumeOfSampleUsed]*self.numberOfSamples
print(f"The volumes used for normalization have been updated:\n\tVolume of dilution: {self.volumesOfDilution}\n\tVolume of sample used: {self.volumesOfSampleSoupUsed}")
def updateVolumeOfDilutionFromTemplateFile(self, columnName, activated, variable="dilution", backupValueDilution=750, backupValueSample=5, useBackupDilution=True, useBackupSample=True):
templateMap = pd.read_excel(self.templateFileName, sheet_name="MAP")
declaredIdx = templateMap.SampleName.dropna()[templateMap.SampleName.dropna().str.match(self.__regexExpression["Samples"], na=False)].index
if ((variable == "dilution") & (activated)):
self.volumesOfDilution = templateMap.loc[declaredIdx, columnName].values
print(f"The dilution volumes used for normalization have been updated from template to {self.volumesOfDilution}")
assert len(self.volumesOfDilution[~np.isnan(self.volumesOfDilution)])==len(declaredIdx),\
f"The number of volume of dilutions declared in the Template file (n={len(self.volumesOfDilution[~np.isnan(self.volumesOfDilution)])}) is different than the number of samples declared (n={len(declaredIdx)})"
elif ((variable == "sample") & (activated)):
self.volumesOfSampleSoupUsed = templateMap.loc[declaredIdx, columnName].values
print(f"The sample volumes used for normalization have been updated from template to {self.volumesOfSampleSoupUsed}")
assert len(self.volumesOfSampleSoupUsed[~np.isnan(self.volumesOfSampleSoupUsed)])==len(declaredIdx),\
f"The number of sample volumes declared in the Template file (n={len(self.volumesOfSampleSoupUsed[~np.isnan(self.volumesOfSampleSoupUsed)])}) is different than the number of samples declared (n={len(declaredIdx)})"
else:
self.updateVolumesOfSampleDilution(backupValueDilution, backupValueSample, useBackupDilution, useBackupSample)
def updateNormalizationType(self, newType):
self.weightNormalization = bool(newType)
if (self.weightNormalization):
typeNorm = "by total weight"
else:
typeNorm = "by relative weight"
print(f"The normalization when computing the data has been changed to '{typeNorm}'")
# Debouncing active for computing the NA correction.
# Makes for a smoother user experience (no lagging) when ion purity are changed int he textbox
# Note that this means that the function will be called with the specified delay after parameters are changed
@debounce(1.1)
def computeNACorrectionDf(self):
self.dataDf_corrected = self.correctForNaturalAbundance()
self.dataDf_labeledProportions = self.calculateLabeledProportionForAll()
def computeNormalizedData(self):
'''Normalize the data to the internal ref'''
if self.experimentType == "Not Labeled":
dataDf_norm = self.dataDf.copy()
dataDf_norm.iloc[:, self._dataStartIdx:] = dataDf_norm.iloc[:, self._dataStartIdx:].divide(dataDf_norm[self.internalRef], axis=0)
else:
sumFracDf = self.calculateSumIonsForAll()
sumFracDf = sumFracDf.divide(sumFracDf[self.internalRef], axis=0)
# sumFracDf = pd.DataFrame(columns=sumFracDf.columns, data=sumFracDf.values/sumFracDf[self.internalRef].values[:, np.newaxis])
dataDf_norm = | pd.concat([self.dataDf.iloc[:, :self._dataStartIdx], sumFracDf], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import time
import multiprocessing as mp
start_time=time.time()
def deepl(location1,location2):
data=pd.read_csv(location1)
data_columns=data.columns
xtrain = data[data_columns[data_columns != 'typeoffraud']]
ytrain=data['typeoffraud']
data1=pd.read_csv(location2)
data1_columns=data1.columns
xtest = data1[data1_columns[data1_columns != 'typeoffraud']]
ytest=data1['typeoffraud']
xtrain_norm = (xtrain - xtrain.mean()) / xtrain.std()
xtest_norm = (xtest - xtest.mean()) / xtest.std()
n_cols = xtrain_norm.shape[1]
ytrain=to_categorical(ytrain)
ytest=to_categorical(ytest)
num_classes=ytrain.shape[1]
def classification_model():
# create model
model = Sequential()
model.add(Dense(100,activation='relu', input_shape=(n_cols,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# build the model
model = classification_model()
# fit the model
model.fit(xtrain_norm, ytrain, validation_data=(xtest_norm, ytest), epochs=10, verbose=1)
# evaluate the model
# test_loss,test_acc=model.evaluate(xtest_norm, ytest)
test_labels_p=model.predict(xtest_norm)
test_labels_p=np.argmax(test_labels_p,axis=1)
rel=list(zip(test_labels_p))
pp=pd.DataFrame(data=rel,columns=['label'])
pp.to_csv('label.csv',index=False)
################################################################################################################
def maketags(location2,location3):
e=pd.read_csv(location2)
tags=[]
ids=[]
tags1=[]
ids1=[]
for i,l in enumerate(e['typeoffraud']):
if l==1 or l==2 or l==3:
ids.append(e.iloc[i,1])
tags.append(e.iloc[i,4])
if l==4 or l==5 or l==6:
ids1.append(e.iloc[i,1])
tags1.append(e.iloc[i,4])
rel=list(zip(ids,tags))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofhead.csv',index=False)
rel1=list(zip(ids1,tags1))
pp1= | pd.DataFrame(data=rel1,columns=['ids','tags']) | pandas.DataFrame |
"""
This module tests high level dataset API functions which require entire datasets, indices, etc
"""
from collections import OrderedDict
import pandas as pd
import pandas.testing as pdt
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
def test_dataset_get_indices_as_dataframe_partition_keys_only(
dataset_with_index, store_session
):
expected = pd.DataFrame(
OrderedDict([("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
result = ds.get_indices_as_dataframe(columns=dataset_with_index.partition_keys)
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe(dataset_with_index, store_session):
expected = pd.DataFrame(
OrderedDict([("L", [1, 2]), ("P", [1, 2])]),
index=pd.Index(["P=1/cluster_1", "P=2/cluster_2"], name="partition"),
)
ds = dataset_with_index.load_partition_indices()
ds = ds.load_index("L", store_session)
result = ds.get_indices_as_dataframe()
pdt.assert_frame_equal(result, expected)
def test_dataset_get_indices_as_dataframe_duplicates():
ds = DatasetMetadata(
"some_uuid",
indices={
"l_external_code": ExplicitSecondaryIndex(
"l_external_code", {"1": ["part1", "part2"], "2": ["part1", "part2"]}
),
"p_external_code": ExplicitSecondaryIndex(
"p_external_code", {"1": ["part1"], "2": ["part2"]}
),
},
)
expected = pd.DataFrame(
OrderedDict(
[
("p_external_code", ["1", "1", "2", "2"]),
("l_external_code", ["1", "2", "1", "2"]),
]
),
index=pd.Index(["part1", "part1", "part2", "part2"], name="partition"),
)
result = ds.get_indices_as_dataframe()
| pdt.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import os
import pandas as pd
import numpy as np
import pickle
import argparse
## torch packages
import torch
from transformers import BertTokenizer,AutoTokenizer
import re
## for visualisation
import matplotlib.pyplot as plt
import collections
## custom packages
from extract_lexicon import get_arousal_vec,get_valence_vec,get_dom_vec
from utils import flatten_list,tweet_preprocess
from label_dict import ed_label_dict as emo_map
from label_dict import ed_emo_dict as emo_map_inverse
def get_one_hot(emo, class_size):
targets = np.zeros(class_size)
emo_list = [int(e) for e in emo.split(",")]
for e in emo_list:
targets[e] = 1
return list(targets)
def get_speaker_info(speaker_id):
if int(speaker_id) % 2 == 0:
speaker = 1 # listener utterance
else:
speaker = 0 # speaker utterance
return speaker
def data_reader(data_folder, datatype,save=True):
'''
Reads the raw data from EmpatheticDialogues dataset, preprocess the data and save it in a pickle file
'''
print("Datatype:",datatype)
ongoing_utterance_list = []
ids = []
speaker_info = []
data = {'prompt':[],'utterance_data_list':[],'utterance_data':[],'utterance_id':[],"speaker_info":[],'emotion_label':[],'emotion':[]}
df = open(os.path.join(data_folder, f"{datatype}.csv")).readlines()
for i in range(2,len(df)): # starts with 2 becauase df[0] is the coloumn headers, so i-1 i.e. 2-1=1 will start from the actual data
prev_utterance_parts = df[i-1].strip().split(",")
current_utterance_parts = df[i].strip().split(",")
if prev_utterance_parts[0] == current_utterance_parts[0]: #to detect if its the ongoing conversation or the next conversation
prev_utterance_str = prev_utterance_parts[5].replace("_comma_", ",") #replace _comma_ for utterance
ongoing_utterance_list.append(prev_utterance_str)
ids.append((prev_utterance_parts[0],prev_utterance_parts[1]))
speaker_info.append(get_speaker_info(prev_utterance_parts[1]))
if i == len(df)-1 : # reaches the end of the dataset and this adds the last utterance to the ongoing utterance list
current_utterance_str = current_utterance_parts[5].replace("_comma_", ",") #replace _comma_ for utterance
emotion_label_str = current_utterance_parts[2]
prompt_str = current_utterance_parts[3].replace("_comma_", ",")
emotion_label_int = emo_map[current_utterance_parts[2]]
ongoing_utterance_list.append(current_utterance_str)
ids.append((current_utterance_parts[0],current_utterance_parts[1]))
speaker_info.append(get_speaker_info(current_utterance_parts[1]))
data["prompt"].append(prompt_str)
data["utterance_data_list"].append(ongoing_utterance_list)
data["utterance_data"].append("".join(ongoing_utterance_list))
data["utterance_id"].append(ids)
data["speaker_info"].append(speaker_info)
data["emotion_label"].append(emotion_label_str)
data["emotion"].append(emotion_label_int)
else: # condition where it reaches the end of a conversation, so the prev_utterance was part of the previous conversation which is added to the ongoing utterance list
prev_utterance_str = prev_utterance_parts[5].replace("_comma_", ",") #replace _comma_ for utterance
emotion_label_str = prev_utterance_parts[2]
prompt_str = prev_utterance_parts[3].replace("_comma_", ",")
emotion_label_int = emo_map[prev_utterance_parts[2]]
ongoing_utterance_list.append(prev_utterance_str)
ids.append((prev_utterance_parts[0],prev_utterance_parts[1]))
speaker_info.append(get_speaker_info(prev_utterance_parts[1]))
data["prompt"].append(prompt_str)
data["utterance_data_list"].append(ongoing_utterance_list)
data["utterance_data"].append("".join(ongoing_utterance_list))
data["utterance_id"].append(ids)
data["speaker_info"].append(speaker_info)
data["emotion_label"].append(emotion_label_str)
data["emotion"].append(emotion_label_int)
ongoing_utterance_list = []
ongoing_utterance_inter_list = []
ids = []
speaker_info = []
processed_data = {"prompt":data["prompt"],"utterance_data_list":data["utterance_data_list"],"utterance_data":data["utterance_data"],"speaker_info":data["speaker_info"],"emotion":data["emotion"]}
return processed_data
def tokenize_data(processed_data,tokenizer_type="bert-base-uncased"):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_type)
tokenized_inter_speaker, tokenized_inter_listener = [],[]
tokenized_total_data,tokenized_speaker,tokenized_listener = [],[],[]
tokenized_list_data,tokenized_turn_data = [],[]
arousal_data,valence_data,dom_data = [],[],[]
for u,val_utterance in enumerate(processed_data["utterance_data_list"]): #val utterance is one conversation which has multiple utterances
tokenized_i= tokenizer.batch_encode_plus(val_utterance,add_special_tokens=False)["input_ids"]
speaker_utterance,listener_utterance,speaker_iutterance,listener_iutterance,total_utterance = [101],[101],[101],[101],[101]
total_utterance_list = []
for s,val_speaker in enumerate(tokenized_i): ## for each utterance inside a conversation
if s%2 == 0: # when person is the "speaker"
speaker_utterance.extend(val_speaker+[102])
speaker_iutterance.extend(val_speaker+[102])
listener_iutterance.extend([0 for _ in range(len(val_speaker))]+[102])
#
else:
listener_utterance.extend(val_speaker+[102])
listener_iutterance.extend(val_speaker+[102])
speaker_iutterance.extend([0 for _ in range(len(val_speaker))]+[102])
total_utterance.extend(val_speaker+[102])
total_utterance_list.append(val_speaker+[102])
turn_data = [[101]+a+b for a, b in zip(total_utterance_list[::2],total_utterance_list[1::2])] # turnwise data, [[s1],[l1],[s2],[l2],..] --> [[s1;l1],[s2;l2],..]
total_utterance_list = [[101]+i for i in total_utterance_list] #appending 101 to every utterance start
arousal_vec = get_arousal_vec(tokenizer,total_utterance)
valence_vec = get_valence_vec(tokenizer,total_utterance)
dom_vec = get_dom_vec(tokenizer,total_utterance)
tokenized_inter_speaker.append(speaker_iutterance)
tokenized_inter_listener.append(listener_iutterance)
tokenized_speaker.append(speaker_utterance)
tokenized_listener.append(listener_utterance)
tokenized_total_data.append(total_utterance)
tokenized_list_data.append(total_utterance_list)
tokenized_turn_data.append(turn_data)
arousal_data.append(arousal_vec)
valence_data.append(valence_vec)
dom_data.append(dom_vec)
assert len(tokenized_list_data) == len(tokenized_turn_data) ==len(tokenized_inter_speaker) == len(tokenized_inter_listener) == len(tokenized_total_data) ==len(tokenized_listener) ==len(tokenized_speaker) == len(processed_data["emotion"]) == len(tokenized_total_data) == len(arousal_data) == len(valence_data) == len(dom_data)
save_data = {"utterance_data_list":tokenized_list_data,"utterance_data":tokenized_total_data,"utterance_data_str":processed_data["utterance_data_list"],"speaker_idata":tokenized_inter_speaker,"listener_idata":tokenized_inter_listener,"speaker_data":tokenized_speaker,"listener_data":tokenized_listener,"turn_data":tokenized_turn_data,"arousal_data":arousal_data,"valence_data":valence_data,"dom_data":dom_data,"emotion":processed_data["emotion"]}
return save_data
def go_emotions_preprocess(tokenizer_type="bert-base-uncased"):
data_dict = {}
data_home = "./.data/goemotions/"
nlabel = 28
for datatype in ["train","valid","test"]:
datafile = data_home + datatype + ".tsv"
## cause => tweet, changed for uniformity sake
data = pd.read_csv(datafile, sep='\t',names=["cause","emotion","user"])
emotion,cause = [],[]
for i,emo in enumerate(data["emotion"]):
emotion.append(get_one_hot(emo,nlabel))
cause.append(data["cause"][i])
print("Tokenizing data")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_type)
tokenized_cause =tokenizer.batch_encode_plus(cause).input_ids
processed_data = {}
maximum_utterance = max([len(i) for i in tokenized_cause])
average_utterance = np.mean([len(i) for i in tokenized_cause])
print(len(cause),len(emotion),len(tokenized_cause))
print("Max utterance length:",maximum_utterance,"Avg utterance length:",average_utterance)
## changed prompt --> cause for uniformity
processed_data["tokenized_cause"] = tokenized_cause
processed_data["emotion"] = emotion
processed_data["cause"] = cause
arousal_vec,valence_vec,dom_vec = [],[],[]
for cause_i in tokenized_cause:
arousal_vec.append(get_arousal_vec(tokenizer,cause_i))
valence_vec.append(get_valence_vec(tokenizer,cause_i))
dom_vec.append(get_dom_vec(tokenizer,cause_i))
processed_data["arousal_data"] = arousal_vec
processed_data["valence_data"] = valence_vec
processed_data["dom_data"] = dom_vec
processed_data = | pd.DataFrame.from_dict(processed_data) | pandas.DataFrame.from_dict |
import streamlit as st
import torch
import torchvision
from detectron2.utils.logger import setup_logger
import numpy as np
import os, json, cv2, random
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from PIL import Image
import pandas as pd
import requests
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
from streamlit_folium import folium_static
import folium
setup_logger()
title = '<p style="font-family:monospace; color:orange; font-size: 40px;"><b>Parking Space Detection using Detectron2</b></p>'
st.markdown(title,unsafe_allow_html=True)
st.sidebar.subheader('**Upload a File**')
file_upload = st.sidebar.file_uploader("Choose a Image",type=['png','jpeg','jpg'])
if file_upload is not None:
#get file details
file_details = {"filename":file_upload.name, "filetype":file_upload.type,"filesize":file_upload.size}
st.sidebar.markdown('**File Details**')
st.sidebar.write(file_details)
st.subheader('**Input Image**')
file_bytes = np.asarray(bytearray(file_upload.read()), dtype=np.uint8)
opencv_image = cv2.imdecode(file_bytes, 1)
st.image(opencv_image, channels="BGR",width=380)
st.write('\n')
cfg = get_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.merge_from_file('config.yml')
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.3
cfg.MODEL.WEIGHTS = 'model_final.pth'
predictor = DefaultPredictor(cfg)
outputs = predictor(opencv_image)
c,k=0,0
for i in outputs["instances"].pred_classes:
if i==1:
c=c+1
elif i==2:
k=k+1
st.subheader('**Inferenced Image**')
v = Visualizer(opencv_image[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
st.image(out.get_image()[:, :, ::-1],caption='Processed Image',width=380)
st.write('**Inferenced Details**')
st.markdown(f"Available Parking Space: {c}")
st.markdown(f"Filled Parking Space: {k}")
# Visualize
st.subheader('**Visualize**')
d = {'X_axis': ['Available Parking Space','Filled Parking Space'],
'Y_axis': [c,k]}
df = | pd.DataFrame(d) | pandas.DataFrame |
import scipy.optimize as sco
from pandas_datareader import data
import pandas as pd
import numpy as np
from scipy.stats import norm
# declare tickers& weights (3m)
portfolio_tickers = ['SPY', 'AMZN', 'AAPL']
weights = [0.5, 0.5, 0.0]
start_date = '2021-07-01'
end_date = '2021-09-30'
panel_data_2y = data.DataReader(
portfolio_tickers, 'yahoo', start_date, end_date)
closes_3m = panel_data_2y[['Close', 'Adj Close']]
closes_3m = closes_3m.loc[start_date: end_date]
# portfolio returns
return_series_adj = (closes_3m['Adj Close'].pct_change() + 1).cumprod() - 1
weighted_return_series_adj = weights * (return_series_adj)
return_series_adj = weighted_return_series_adj.sum(axis=1)
sector_etf = ['XLE', 'XLF', 'XLK', 'XLRE', 'XLY',
'XLI', 'XLB', 'XLC', 'XLV', 'XLP', 'XLU']
# start_date = '2021-07-01'
# end_date = '2021-09-30'
panel_data = data.DataReader(sector_etf, 'yahoo', start_date, end_date)
sector_closes_3m = panel_data[['Close', 'Adj Close']]
sector_closes_3m = sector_closes_3m.loc[start_date: end_date]
weighted_closes = pd.DataFrame((weights * closes_3m["Close"]).sum(axis=1))
weighted_closes = weighted_closes.rename(columns={0: 'portfolio'})
all_closes_3m = sector_closes_3m["Close"].join(weighted_closes)
return_series_close = (all_closes_3m.pct_change() + 1).cumprod() - 1
correlation = pd.DataFrame(return_series_close.corr().tail(1).round(3))
# correlation
correlation = correlation.sort_values(by="portfolio", axis=1)
top3_inverse_sectors = correlation.columns.tolist()[0:3]
# top3_inverse_sectors
# select 3 representative tickers by sectors;
sector_dict = {
'XLB': ['LIN', 'ECL', 'GOLD'], 'XLC': ['GOOGL', 'FB', 'NFLX'], 'XLY': ['AMZN', 'TSLA', 'NKE'], 'XLP': ['KO', 'WMT', 'PEP'], 'XLE': ['XOM', 'CVX', 'PSX'], 'XLF': ['V', 'MA', 'JPM'], 'XLV': ['JNJ', 'PFE', 'UNH'], 'XLI': ['HON', 'GE', 'FDX'], 'XLR': ['AMT', 'CCI', 'PSA'], 'XLK': ['MSFT', 'AAPL', 'CRM'], 'XLU': ['NEE', 'DUK', 'XEL']
}
inverse_sector_tickers = []
for t in top3_inverse_sectors:
for x in sector_dict[t]:
inverse_sector_tickers.append(x)
# use 2y for rs
start_date = '2019-01-01'
end_date = '2020-12-31'
panel_data = data.DataReader(
inverse_sector_tickers, 'yahoo', start_date, end_date)
sector_closes_2y = panel_data[['Adj Close']]
sector_closes_2y = sector_closes_2y.loc[start_date: end_date]
###########################################################
# portfolio_2y_return_series_adj
###########################################################
# portfolio_tickers = ['SPY','AMZN','AAPL']
# weights = [0.5, 0.5, 0.0]
# start_date = '2019-01-01'
# end_date = '2020-12-31'
panel_data = data.DataReader(portfolio_tickers, 'yahoo', start_date, end_date)
closes_2y = panel_data[['Adj Close']]
closes_2y = closes_2y.loc[start_date: end_date]
# // return series for the period and plot the returns on a single chart.
return_series_adj = (closes_2y['Adj Close'].pct_change() + 1).cumprod() - 1
ret_ax = return_series_adj.plot(figsize=(16, 9), title="Total Return")
ret_ax.yaxis.set_major_formatter(
FuncFormatter(lambda y, _: '{:.0%}'.format(y)))
# portfolio returns
weighted_return_series_adj_2y = weights * (return_series_adj)
return_series_adj_2y = weighted_return_series_adj_2y.sum(axis=1)
###########################################################
# Efficient Frontiner from Modern Portfolio Theory
###########################################################
plt.style.use('fivethirtyeight')
np.random.seed(777)
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
###########################################################
# Random Portfolios Generation
def portfolio_annualised_performance(weights, mean_returns, cov_matrix):
returns = np.sum(mean_returns*weights) * 252
std = np.sqrt(np.dot(weights.T, np.dot(
cov_matrix, weights))) * np.sqrt(252)
return std, returns
###########################################################
# Efficient Frontier
###########################################################
def neg_sharpe_ratio(weights, mean_returns, cov_matrix, risk_free_rate):
p_var, p_ret = portfolio_annualised_performance(
weights, mean_returns, cov_matrix)
return -(p_ret - risk_free_rate) / p_var
def max_sharpe_ratio(mean_returns, cov_matrix, risk_free_rate):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix, risk_free_rate)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0, 1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(neg_sharpe_ratio, num_assets*[1./num_assets, ], args=args,
method='SLSQP', bounds=bounds, constraints=constraints)
return result
def portfolio_volatility(weights, mean_returns, cov_matrix):
return portfolio_annualised_performance(weights, mean_returns, cov_matrix)[0]
def min_variance(mean_returns, cov_matrix):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0, 1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(portfolio_volatility, num_assets*[1./num_assets, ], args=args,
method='SLSQP', bounds=bounds, constraints=constraints)
return result
def efficient_return(mean_returns, cov_matrix, target):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix)
def portfolio_return(weights):
return portfolio_annualised_performance(weights, mean_returns, cov_matrix)[1]
constraints = ({'type': 'eq', 'fun': lambda x: portfolio_return(x) - target},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple((0, 1) for asset in range(num_assets))
result = sco.minimize(portfolio_volatility, num_assets*[
1./num_assets, ], args=args, method='SLSQP', bounds=bounds, constraints=constraints)
return result
def efficient_frontier(mean_returns, cov_matrix, returns_range):
efficients = []
for ret in returns_range:
efficients.append(efficient_return(mean_returns, cov_matrix, ret))
return efficients
def display_ef_with_selected(mean_returns, cov_matrix, risk_free_rate):
max_sharpe = max_sharpe_ratio(mean_returns, cov_matrix, risk_free_rate)
sdp, rp = portfolio_annualised_performance(
max_sharpe['x'], mean_returns, cov_matrix)
max_sharpe_allocation = pd.DataFrame(
max_sharpe.x, index=table.columns, columns=['allocation'])
max_sharpe_allocation.allocation = [
round(i*100, 2)for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
max_sharpe_allocation
min_vol = min_variance(mean_returns, cov_matrix)
sdp_min, rp_min = portfolio_annualised_performance(
min_vol['x'], mean_returns, cov_matrix)
min_vol_allocation = pd.DataFrame(
min_vol.x, index=table.columns, columns=['allocation'])
min_vol_allocation.allocation = [
round(i*100, 2)for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
an_vol = np.std(returns) * np.sqrt(252)
an_rt = mean_returns * 252
ret_list = [round(rp, 2), round(sdp, 2), max_sharpe_allocation.iloc[0, 0], max_sharpe_allocation.iloc[0, 1], round(
rp_min, 2), round(sdp_min, 2), min_vol_allocation.iloc[0, 0], min_vol_allocation.iloc[0, 1]]
for i, txt in enumerate(table.columns):
ret_list.append(round(an_rt[i], 2))
ret_list.append(round(an_vol[i], 2))
ret_list.append(pd.DataFrame(mean_returns).index.tolist()[0])
return ret_list
#####################################################################
# content for each sector in ret_list
#####################################################################
# Maximum Sharpe Ratio Portfolio Allocation
# 1) Annualised Return: 0.39
# 2) Annualised Volatility: 0.29
# JPM portfolio
# 3) allocation- sector ticker 6.81
# 4) allocation- portfolio 93.19
#####################################################################
# Minimum Volatility Portfolio Allocation
# 5) Annualised Return: 0.36
# 6) Annualised Volatility: 0.28
# JPM portfolio
# 7) allocation- sector ticker 28.11
# 8) allocation- portfolio 71.89
#####################################################################
# Individual Stock Returns and Volatility
# 9) annuaised return- sector ticker;
# 10) annualised volatility- sector ticker;
# 11) annuaised return- portfolio;
# 12) annualised volatility- portfolio;
# 13) ticker_symbol
#####################################################################
# 2y
# portfolio_tickers = ['SPY','AMZN','AAPL']
start_date = '2019-01-01'
end_date = '2020-12-31'
panel_data = data.DataReader(portfolio_tickers, 'yahoo', start_date, end_date)
closes_2y = panel_data[['Adj Close']]
closes_2y = closes_2y.loc[start_date: end_date]
weighted_closes_2y = pd.DataFrame(
(weights * closes_2y["Adj Close"]).sum(axis=1))
weighted_closes_2y = weighted_closes_2y.reset_index()
weighted_closes_2y = weighted_closes_2y.rename(
columns={"Date": "date", 0: "adj_close"})
weighted_closes_2y["ticker"] = "portfolio"
sector_res = []
for t in inverse_sector_tickers:
panel_data = pd.DataFrame()
t_data = data.DataReader(t, 'yahoo', start_date, end_date)
t_data = t_data.loc[start_date: end_date]
t_data = t_data[['Adj Close']]
t_data = t_data.reset_index()
t_data["ticker"] = t
t_data = t_data.rename(
columns={"Date": "date", t_data.columns[1]: "adj_close"})
panel_data = panel_data.append(t_data)
panel_data = panel_data.append(weighted_closes_2y)
df = panel_data.set_index('date')
df.head()
table = df.pivot(columns='ticker')
table.columns = [col[1] for col in table.columns]
table.head()
returns = table.pct_change()
mean_returns = returns.mean()
cov_matrix = returns.cov()
risk_free_rate = 0.0178
sector_res.append(display_ef_with_selected(
mean_returns, cov_matrix, risk_free_rate))
# print(sector_res)
###########################################################
# Maximum Sharpe Ratio Portfolio Allocation
###########################################################
# Plot the weighted returns and individual returns on the same plot for comparison
fig, ax = plt.subplots(figsize=(16, 9))
ax.plot(return_series_adj_2y, label='current portfolio')
for sector in sector_res:
# portfolio returns
weights = [sector[2], sector[3]]
sector_closes_2y_return_series_adj = (
sector_closes_2y['Adj Close'].pct_change() + 1).cumprod() - 1
return_series_adj_2y_new_sector = pd.DataFrame(sector_closes_2y_return_series_adj[sector[12]]).join(
pd.DataFrame(return_series_adj_2y)).rename(columns={0: 'portfolio'})
weighted_return_series_adj = weights * (return_series_adj_2y_new_sector)
# Sum the weighted returns for portfolio
df = weighted_return_series_adj.sum(axis=1)/100
ax.plot(df, label="+ " + sector[12])
ax.set_title("Return series (for Maximum Sharpe Ratio Portfolio Allocation)")
ax.legend()
ax.legend(labelspacing=0.8)
############################################################
# Minimum Volatility Portfolio Allocation
###########################################################
fig, ax = plt.subplots(figsize=(16, 9))
ax.plot(return_series_adj_2y, label='current portfolio')
for sector in sector_res:
# portfolio returns
weights = [sector[6], sector[7]]
sector_closes_2y_return_series_adj = (
sector_closes_2y['Adj Close'].pct_change() + 1).cumprod() - 1
return_series_adj_2y_new_sector = pd.DataFrame(sector_closes_2y_return_series_adj[sector[12]]).join(
| pd.DataFrame(return_series_adj_2y) | pandas.DataFrame |
import pandas as pd
df = | pd.read_csv("data/1P2015AEDL.csv", header=0, sep=",", low_memory=False) | pandas.read_csv |
def upsampled(names,homepath):
'''
Parameters
----------
`names` (list):
List of the cancer names found from `featSelect`
function.
`homepath` (str):
Path where you want to save all the generated files
and folders.
Return:
-------
None
Outputs:
--------
Upsample the train data into 3:1 ratio and save it in
"~/train_data"
'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from collections import Counter
import os
import warnings
warnings.filterwarnings("ignore")
# Directory
directory = "train_data"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
# Directory
directory = "train_data/cancer"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
# Directory
directory = "train_data/normal"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
#len(names)
print("Upsampling on train data is running")
for index in range(len(names)):
Cancer = pd.read_csv(homepath+"/pre_upsample_train_data/cancer/"+
names[index]+".txt.bz2",header=None, delimiter = '\t')
Normal = pd.read_csv(homepath+"/pre_upsample_train_data/normal/"+
names[index]+".txt.bz2",header=None, delimiter = '\t')
can_sample = len(Cancer.columns)
norm_sample = len(Normal.columns)
Cancer = Cancer.T
Normal = Normal.T
if( norm_sample <= round(can_sample/3) ):
## adding target in the last col
Cancer[str(len(Cancer.columns))] = 1
Normal[str(len(Normal.columns))] = 0
#print(len(Cancer.columns))
frame = [Cancer,Normal]
Data = pd.concat(frame,axis=0)
#print(Data)
x = Data.iloc[:,:len(Cancer.columns)-1]
y = Data.iloc[:,len(Cancer.columns)-1]
# print(x)
# print(y)
# summarize class distribution
counter = Counter(y)
#print(counter)
# transform the dataset
oversample = SMOTE(k_neighbors=1, sampling_strategy=0.3333)
X, y = oversample.fit_resample(x, y)
# summarize the new class distribution
counter = Counter(y)
#print(counter)
X = | pd.DataFrame(data=X) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Creating GUI with tkinter
import json
import tkinter
from tkinter import *
from tensorflow.keras.models import load_model, model_from_json
import io
import os
import pandas as pd
import numpy as np
symptoms_url = "https://raw.githubusercontent.com/KellieChong/MacHacks2021/main/symptoms_diseases_data/sym_t.csv"
diagnosis_url = "https://raw.githubusercontent.com/KellieChong/MacHacks2021/main/symptoms_diseases_data/dia_t.csv"
symptomsdf = pd.read_csv(symptoms_url)
diagnosisdf = pd.read_csv(diagnosis_url)
# symptomsdf = symptomsdf.tonumpy()
# diagnosisdf = diagnosisdf.tonumpy()
#not really sure if we need to load these
f = open("/Users/kelliechong/documents/MacHacks/model.json",)
json_model = json.loads(f.read())
model = load_model('/Users/kelliechong/documents/MacHacks/diagnosis_model_updated.h5')
model.load_weights('/Users/kelliechong/documents/MacHacks/diagnosis_model_updated.h5')
symptomsdf = pd.DataFrame(symptomsdf).to_numpy()
diagnosisdf = | pd.DataFrame(diagnosisdf) | pandas.DataFrame |
## calculating correlationcoefficient ##
# using sitiuation is Transfer learning.
# Can be used to fill missing values.
import numpy as np
import pandas as pd
from minepy import MINE
from pyHSICLasso import HSICLasso
from sklearn.utils import check_array
class CorrelationCoefficient():
def mic_matrix(self, data, n_sample=False, frac_sample=False):
'''Get mic correlation coefficient matrix
Calculated coefficients are saved as instances.
data: (numpy or pandas) A data frame that contains all explanatory and objective variables
n_sample : (int) How much random sampling to do. False if not.
If a numerical value is entered, sampling is performed using that number of rows.
frac_sample: [0 ~ 1] (float) Sampled as a percentage of the number of rows. Not used at the same time as n_sample.
'''
data = pd.DataFrame(data).copy()
data = data.dropna() # Delete missing values and think
# Sampling when n_sample contains a numerical value
if not n_sample:
if not frac_sample:
# n_sample=False, frac_sample=False
pass
else:
# n_sample=False, frac_sample=int
data = data.sample(frac=frac_sample, replace=True)
else:
if not frac_sample:
# n_sample=int, frac_sample=False
data = data.sample(n=n_sample, replace=True)
else:
# n_sample=int, frac_sample=int
raise ValueError('Please enter a value for `frac` OR `n`, not both')
data = check_array(data, accept_sparse="csc", dtype=float) # numpy.ndarrayに変換
n_col = data.shape[1]
mic_array = []
mic_append = mic_array.append
for i in range(n_col):
temp_mic = [] # list to store one line of mic
temp_mic_append = temp_mic.append
for j in range(n_col):
# Calculate only one of them because it is symmetric
if i>=j:
temp_mic_append(0.0)
else:
mine=MINE()
mine.compute_score(data[:, i], data[:, j])
temp_mic_append(mine.mic())
mic_append(temp_mic)
mic_ = np.array(mic_array)
# Create a correlation coefficient matrix by copying the symmetry of the upper triangular matrix → transpose → unit matrix.
self.mic = mic_ + mic_.T + np.eye(N=n_col, dtype=float)
return self.mic
def mine_matrix(self, data ,n_sample=False, frac_sample=False):
'''Obtains all coefficient values related to mine as a correlation coefficient matrix.
Calculated coefficients are saved as instances.
data: (numpy or pandas) A data frame that contains all explanatory and objective variables
n_sample : (int) How much random sampling to do. False if not.
If a numerical value is entered, sampling is performed using that number of rows.
frac_sample: [0 ~ 1] (float) Sampled as a percentage of the number of rows. Not used at the same time as n_sample.
'''
data = | pd.DataFrame(data) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
from ilastikrag.accumulators import BaseEdgeAccumulator
logger = logging.getLogger(__name__)
class EdgeRegionEdgeAccumulator(BaseEdgeAccumulator):
"""
Accumulator for computing region axes and region radii over edge coordinates.
(The :py:class:`~ilastikrag.accumulators.standard.StandardEdgeAccumulator`
class does not provide region features.)
We don't use vigra's RegionFeatureAccumulators because we only have
access to the sparse lists of edge pixels (along each axis).
Instead, we manually compute the region axes/radii directly from the
edge coordinate columns.
Supported feature names:
- edgeregion_edge_area (radii_0 * radii_1)
- edgeregion_edge_volume (radii_0 * radii_1 * radii_2)
..
- edgeregion_edge_regionradii (all of the below)
- edgeregion_edge_regionradii_0
- edgeregion_edge_regionradii_1
- edgeregion_edge_regionradii_2
..
- edgeregion_edge_regionaxes (all of the below)
- edgeregion_edge_regionaxes_0x
- edgeregion_edge_regionaxes_0y
- edgeregion_edge_regionaxes_0z
- edgeregion_edge_regionaxes_1x
- edgeregion_edge_regionaxes_1y
- edgeregion_edge_regionaxes_1z
- edgeregion_edge_regionaxes_2x
- edgeregion_edge_regionaxes_2y
- edgeregion_edge_regionaxes_2z
"""
ACCUMULATOR_ID = 'edgeregion'
def __init__(self, rag, feature_names):
self.cleanup() # Initialize members
label_img = rag.label_img
self._dense_axiskeys = list(label_img.axistags.keys())
if rag.flat_superpixels:
self._dense_axiskeys = ['y', 'x']
feature_names = list(feature_names)
# 'edgeregion_edge_regionradii' is shorthand for "all edge region radii"
if 'edgeregion_edge_regionradii' in feature_names:
feature_names.remove('edgeregion_edge_regionradii')
for component_index in range(label_img.ndim):
feature_names.append( 'edgeregion_edge_regionradii_{}'.format( component_index ) )
# 'edgeregion_edge_regionaxes' is shorthand for "all edge region axes"
if 'edgeregion_edge_regionaxes' in feature_names:
feature_names.remove('edgeregion_edge_regionaxes')
for component_index in range(label_img.ndim):
for axisname in ['xyz'[k] for k in range(label_img.ndim)]:
feature_names.append( 'edgeregion_edge_regionaxes_{}{}'.format( component_index, axisname ) )
self._feature_names = feature_names
self._rag = rag
def cleanup(self):
self._final_df = None
def ingest_edges(self, rag, edge_values):
# This class computes only unweighted region
# features, so edge_values is not used below.
# Concatenate edges from all axes into one big DataFrame
tables = [table[['sp1', 'sp2'] + self._dense_axiskeys] for table in rag.dense_edge_tables.values()]
coords_df = | pd.concat(tables, axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[3]:
from pandas import DataFrame
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import metrics
#Import the xls into a dataframe using pandas
file = r'Concrete_Data.xls'
df = pd.read_excel(file)
df.head() #Prints the first few rows
# In[3]:
df.describe()
# In[4]:
#Let Y contain the output variable compressive strength
#Let X contain the data regarding the features
Y = df['Concrete compressive strength(MPa, megapascals) ']
X = df.drop('Concrete compressive strength(MPa, megapascals) ', axis = 1)
#Perform train test split (80% train / 20% test)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,random_state=1)
# In[5]:
'''
Hyperparameter tuning using Grid search CV
'''
#Set the possible values for lambda
alphas = [i for i in range(1,1500)]
#Store the values of cross_val
stored = []
#Store the first set of data to test (e.g. 0.1)
regressor = Ridge(alpha = 0)
cross_scores = cross_val_score(regressor,X_train,y_train,cv = 10,scoring = 'neg_mean_squared_error')
stored.append(cross_scores.mean())
best_value = cross_scores.mean()
best_alpha = 0
#Loop for the remaining possible values of lambda
for i in range(1,len(alphas)):
#Create a model with the given lambda
regressor = Ridge(alpha = alphas[i])
#Store the value of cross-validated score
cross_scores = cross_val_score(regressor,X_train,y_train,cv = 10,scoring = 'neg_mean_squared_error')
stored.append(cross_scores.mean())
#Update the values if a smaller value was found
if cross_scores.mean() < best_value :
best_value = cross_scores.mean()
best_alpha = alphas[i]
print('Best alpha %f Best Value %f'%(best_alpha,best_value))
# In[5]:
plt.plot(alphas, stored)
plt.xlabel('Alpha Values')
plt.ylabel('R^2 Scores')
# In[6]:
#Create Ridge regression model using chosen alpha
regressor = Ridge(alpha = best_alpha)
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
# In[10]:
plt.title("Predicted Y vs. Actual Y")
plt.xlabel("Actual Y", fontsize = 9)
plt.ylabel("Predicted Y", fontsize = 9)
plt.scatter(y_test, y_pred, color='black')
# In[19]:
coeff_df = | pd.DataFrame(regressor.coef_, X.columns, columns=['Coefficient']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import utils
from datetime import datetime
import pickle
import os
testset_fraction=0.2
dataset_one_path = os.path.join(os.path.dirname(__file__),"..", "data", "dataset_one.csv")
dataset_two_path = os.path.join(os.path.dirname(__file__),"..", "data", "dataset_two.xlsx")
preprocessed_data_path = os.path.join(os.path.dirname(__file__),"..", "data", "data_preprocessed.pkl")
# load both datasets
data1 = | pd.read_csv(dataset_one_path, sep=";", decimal=",", encoding="iso-8859-1", delimiter=";", parse_dates={"date and time": ["Date", "Time"]}) | pandas.read_csv |
# -*- coding: utf8 -*-
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.major.width'] = 2
if __name__ == '__main__':
df1 = | pd.read_csv('FASMAtest.csv', delimiter='\t') | pandas.read_csv |
from __future__ import division
import numpy as np
import pandas as pd
from aneris import utils
from aneris import pd_read
from aneris.methods import harmonize_factors, constant_offset, reduce_offset, \
constant_ratio, reduce_ratio, linear_interpolate, model_zero, hist_zero, \
coeff_of_var, default_methods
def _log(msg, *args, **kwargs):
utils.logger().info(msg, *args, **kwargs)
def _warn(msg, *args, **kwargs):
utils.logger().warning(msg, *args, **kwargs)
class Harmonizer(object):
"""A class used to harmonize model data to historical data in the
standard calculation format
"""
# WARNING: it is not possible to programmatically do the offset methods
# because they use lambdas. you can't do `for y in years: lambda x: f(x,
# kwarg=str(y))` because y is evaluated when the lambda is executed, not in
# this block
_methods = {
'model_zero': model_zero,
'hist_zero': hist_zero,
'constant_ratio': constant_ratio,
'constant_offset': constant_offset,
'reduce_offset_2150_cov':
lambda df, offsets: reduce_offset(df, offsets, final_year='2150'),
'reduce_ratio_2150_cov':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2150'),
'reduce_offset_2020':
lambda df, offsets: reduce_offset(df, offsets, final_year='2020'),
'reduce_offset_2030':
lambda df, offsets: reduce_offset(df, offsets, final_year='2030'),
'reduce_offset_2040':
lambda df, offsets: reduce_offset(df, offsets, final_year='2040'),
'reduce_offset_2050':
lambda df, offsets: reduce_offset(df, offsets, final_year='2050'),
'reduce_offset_2060':
lambda df, offsets: reduce_offset(df, offsets, final_year='2060'),
'reduce_offset_2070':
lambda df, offsets: reduce_offset(df, offsets, final_year='2070'),
'reduce_offset_2080':
lambda df, offsets: reduce_offset(df, offsets, final_year='2080'),
'reduce_offset_2090':
lambda df, offsets: reduce_offset(df, offsets, final_year='2090'),
'reduce_offset_2100':
lambda df, offsets: reduce_offset(df, offsets, final_year='2100'),
'reduce_offset_2150':
lambda df, offsets: reduce_offset(df, offsets, final_year='2150'),
'reduce_ratio_2020':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2020'),
'reduce_ratio_2030':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2030'),
'reduce_ratio_2040':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2040'),
'reduce_ratio_2050':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2050'),
'reduce_ratio_2060':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2060'),
'reduce_ratio_2070':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2070'),
'reduce_ratio_2080':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2080'),
'reduce_ratio_2090':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2090'),
'reduce_ratio_2100':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2100'),
'reduce_ratio_2150':
lambda df, ratios: reduce_ratio(df, ratios, final_year='2150'),
'linear_interpolate_2020':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2020'),
'linear_interpolate_2030':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2030'),
'linear_interpolate_2040':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2040'),
'linear_interpolate_2050':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2050'),
'linear_interpolate_2060':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2060'),
'linear_interpolate_2070':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2070'),
'linear_interpolate_2080':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2080'),
'linear_interpolate_2090':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2090'),
'linear_interpolate_2100':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2100'),
'linear_interpolate_2150':
lambda df, offsets: linear_interpolate(df, offsets, final_year='2150'),
}
def __init__(self, data, history, config={}, verify_indicies=True):
"""Parameters
----------
data : pd.DataFrame
model data in standard calculation format
history : pd.DataFrame
history data in standard calculation format
config : dict, optional
configuration dictionary (see http://mattgidden.com/aneris/config.html for options)
verify_indicies : bool, optional
check indicies of data and history, provide warning message if
different
"""
if not isinstance(data.index, pd.MultiIndex):
raise ValueError('Data must use utils.df_idx')
if not isinstance(history.index, pd.MultiIndex):
raise ValueError('History must use utils.df_idx')
if verify_indicies and not data.index.equals(history.index):
idx = history.index.difference(data.index)
msg = 'More history than model reports, adding 0 values {}'
_warn(msg.format(idx.to_series().head()))
df = pd.DataFrame(0, columns=data.columns, index=idx)
data = pd.concat([data, df]).sort_index().loc[history.index]
assert data.index.equals(history.index)
key = 'harmonize_year'
# TODO type
self.base_year = str(config[key]) if key in config else '2015'
numcols = utils.numcols(data)
cols = [x for x in numcols if int(x) >= int(self.base_year)]
self.data = data[cols]
self.model = pd.Series(index=self.data.index,
name=self.base_year).to_frame()
self.history = history
self.methods_used = None
self.offsets, self.ratios = harmonize_factors(
self.data, self.history, self.base_year)
# get default methods to use in decision tree
key = 'default_luc_method'
self.luc_method = config[key] if key in config else None
key = 'default_offset_method'
self.offset_method = config[key] if key in config else None
key = 'default_ratio_method'
self.ratio_method = config[key] if key in config else None
def metadata(self):
"""Return pd.DataFrame of method choice metadata"""
methods = self.methods_used
if isinstance(methods, pd.Series): # only defaults used
methods = methods.to_frame()
methods['default'] = methods['method']
methods['override'] = ''
meta = pd.concat([
methods['method'],
methods['default'],
methods['override'],
self.offsets,
self.ratios,
self.history[self.base_year],
self.history.apply(coeff_of_var, axis=1),
self.data[self.base_year],
self.model[self.base_year],
], axis=1)
meta.columns = [
'method',
'default',
'override',
'offset',
'ratio',
'history',
'cov',
'unharmonized',
'harmonized',
]
return meta
def _default_methods(self):
methods, diagnostics = default_methods(
self.history, self.data, self.base_year,
self.luc_method, self.offset_method, self.ratio_method
)
return methods
def _harmonize(self, method, idx, check_len):
# get data
model = self.data.loc[idx]
hist = self.history.loc[idx]
offsets = self.offsets.loc[idx]
ratios = self.ratios.loc[idx]
# get delta
delta = ratios if 'ratio' in method else offsets
# checks
assert(not model.isnull().values.any())
assert(not hist.isnull().values.any())
assert(not delta.isnull().values.any())
if check_len:
assert((len(model) < len(self.data)) &
(len(hist) < len(self.history)))
# harmonize
model = Harmonizer._methods[method](model, delta)
y = str(self.base_year)
if model.isnull().values.any():
msg = '{} method produced NaNs: {}, {}'
where = model.isnull().any(axis=1)
raise ValueError(msg.format(method,
model.loc[where, y],
delta.loc[where]))
# construct the full df of history and future
return model
def methods(self, overrides=None):
"""Return pd.DataFrame of methods to use for harmonization given
pd.DataFrame of overrides
"""
# get method listing
methods = self._default_methods()
if overrides is not None:
midx = self.model.index
oidx = overrides.index
# remove duplicate values
dup = oidx.duplicated(keep='last')
if dup.any():
msg = 'Removing duplicated override entries found: {}\n'
_warn(msg.format(overrides.loc[dup]))
overrides = overrides.loc[~dup]
# get subset of overrides which are in model
outidx = oidx.difference(midx)
if outidx.size > 0:
msg = 'Removing override methods not in processed model output:\n{}'
_warn(msg.format(overrides.loc[outidx]))
inidx = oidx.intersection(midx)
overrides = overrides.loc[inidx]
# overwrite defaults with overrides
final_methods = overrides.combine_first(methods).to_frame()
final_methods['default'] = methods
final_methods['override'] = overrides
methods = final_methods
return methods
def harmonize(self, overrides=None):
"""Return pd.DataFrame of harmonized trajectories given pd.DataFrame
overrides
"""
# get special configurations
methods = self.methods(overrides=overrides)
# save for future inspection
self.methods_used = methods
if isinstance(methods, pd.DataFrame):
methods = methods['method'] # drop default and override info
if (methods == 'unicorn').any():
msg = """Values found where model has positive and negative values
and is zero in base year. Unsure how to proceed:\n{}\n{}"""
cols = ['history', 'unharmonized']
df1 = self.metadata().loc[methods == 'unicorn', cols]
df2 = self.data.loc[methods == 'unicorn']
raise ValueError(msg.format(df1.reset_index(), df2.reset_index()))
dfs = []
y = str(self.base_year)
for method in methods.unique():
_log('Harmonizing with {}'.format(method))
# get subset indicies
idx = methods[methods == method].index
check_len = len(methods.unique()) > 1
# harmonize
df = self._harmonize(method, idx, check_len)
if method not in ['model_zero', 'hist_zero']:
close = (df[y] - self.history.loc[df.index, y]).abs() < 1e-5
if not close.all():
report = df[~close][y].reset_index()
msg = """Harmonization failed with method {} harmonized \
values != historical values. This is likely due to an \
override in the following variables:\n\n{}
"""
raise ValueError(msg.format(method, report))
dfs.append(df)
df = pd.concat(dfs).sort_index()
self.model = df
return df
class _TrajectoryPreprocessor(object):
def __init__(self, hist, model, overrides, regions, prefix, suffix):
self.hist = hist
self.model = model
self.overrides = overrides
self.prefix = prefix
self.suffix = suffix
self.regions = regions
def _downselect_scen(self, scenario):
isscen = lambda df: df.Scenario == scenario
self.model = self.model[isscen(self.model)]
self.overrides = self.overrides[isscen(self.overrides)]
def _downselect_var(self):
# separate data
select = '|'.join([self.prefix, self.suffix])
_log('Downselecting {} variables'.format(select))
hasprefix = lambda df: df.Variable.str.startswith(self.prefix)
hassuffix = lambda df: df.Variable.str.endswith(self.suffix)
subset = lambda df: df[hasprefix(df) & hassuffix(df)]
self.model = subset(self.model)
self.hist = subset(self.hist)
self.overrides = subset(self.overrides)
if len(self.model) == 0:
msg = 'No Variables found for harmonization. Searched for {}.'
raise ValueError(msg.format(select))
assert(len(self.hist) > 0)
def _to_std(self):
_log('Translating to standard format')
xlator = utils.FormatTranslator()
self.model = (
xlator.to_std(df=self.model.copy(), set_metadata=True)
.set_index(utils.df_idx)
.sort_index()
)
self.hist = (
xlator.to_std(df=self.hist.copy(), set_metadata=False)
.set_index(utils.df_idx)
.sort_index()
)
# override with special cases if more are found in history
self.hist = self.hist[~self.hist.index.duplicated(keep='last')]
# hackery required because unit needed for df_idx
if self.overrides.empty:
self.overrides = None
else:
self.overrides['Unit'] = 'kt'
self.overrides = (
xlator.to_std(df=self.overrides.copy(), set_metadata=False)
.set_index(utils.df_idx)
.sort_index()
)
self.overrides.columns = self.overrides.columns.str.lower()
self.overrides = self.overrides['method']
def _agg_hist(self):
# aggregate and clean hist
_log('Aggregating historical values to native regions')
# must set verify to false for now because some isos aren't included!
self.hist = utils.agg_regions(
self.hist, verify=False, mapping=self.regions,
rfrom='ISO Code', rto='Native Region Code'
)
def _fill_model_trajectories(self):
# add zeros to model values if not covered
idx = self.hist.index
notin = ~idx.isin(self.model.index)
if notin.any():
msg = 'Not all of self.history is covered by self.model: \n{}'
_df = self.hist.loc[notin].reset_index()[utils.df_idx]
_warn(msg.format(_df.head()))
zeros = | pd.DataFrame(0, index=idx, columns=self.model.columns) | pandas.DataFrame |
"""
utils4text.py is the script file storing many useful functions for processing the comment dataframes from the subreddits.
That is, it is mainly used for text EDA.
Made by <NAME>.
"""
import numpy as np
import pandas as pd
import multiprocess as mp
import re
import nltk
import contractions
import string
from emoji import UNICODE_EMOJI
from itertools import repeat
from collections import Counter
from nltk import pos_tag, word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords, wordnet
from joblib import Parallel, delayed
from profanity_check import predict_prob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def build_convs(df):
"""
Use parallel computing. Consider only one post at each time.
Reconstruct the dataframe to a more conversation-like dataframe.
Arg:
df: A given dataframe scraped from a certain subreddit.
Return:
df_convs: A more conversation-like dataframe with the columns such as
conversation ID, subreddit, post title, author, dialog turn, and text.
"""
# initialize conversation dataframe
df_convs = pd.DataFrame(columns = ['subreddit', 'post title', 'author', 'dialog turn', 'text'])
# consider each post
df_link_id = df.reset_index().drop('index', axis = 1)
row_list = []
convs_turn = 0
# add post from df_link_id
post_row = df_link_id.loc[0, :]
convs_turn += 1
row_list.append({'subreddit': post_row['subreddit'], 'post title': post_row['title'],
'author': post_row['post_author'], 'dialog turn': convs_turn, 'text': post_row['post_content']})
# iterate over each comment from df_link_id
for i, row in df_link_id.iterrows():
convs_turn += 1
row_list.append({'subreddit': row['subreddit'], 'post title': row['title'],
'author': row['comment_author'], 'dialog turn': convs_turn, 'text': row['comment_content']})
df_convs = df_convs.append(pd.DataFrame(row_list))
# change data types
df_convs['dialog turn'] = df_convs['dialog turn'].astype('int32')
return df_convs
def apply_parallel(grouped_df, func):
"""
Parallelize the 'build_convs' function by grouping each post and its comments.
And then concatenate all of them into a complete dataframe.
Arg:
grouped_df: A dataframe on which groupby function is applied.
Return:
pd.concat(retLst): A complete dataframe with the conversation sets between posts and comments.
"""
retLst = Parallel(n_jobs = mp.cpu_count())(delayed(func)(group) for id, group in grouped_df)
return pd.concat(retLst)
def build_concise_convs_df(df_convs, njobs = mp.cpu_count()):
"""
Using the functions, build_convs and apply_parallel, a dataframe with conversation sets
can be easily built. Also the id for each conversation is added.
Arg:
df_convs: The original dataframe consisting of posts and comments parsed from the text files.
Return:
df_convs_concise: The concise version of a dataframe with conversation sets.
"""
df_convs_concise = apply_parallel(df_convs.groupby(df_convs.link_id), build_convs)
df_convs_concise['conversation id'] = (df_convs_concise.groupby(['post title']).cumcount() == 0).astype(int)
df_convs_concise['conversation id'] = df_convs_concise['conversation id'].cumsum()
df_convs_concise = df_convs_concise[['conversation id', 'subreddit', 'post title', 'author', 'dialog turn', 'text']]
df_convs_concise = df_convs_concise.reset_index().drop('index', axis = 1)
return df_convs_concise
def remove_marks(text):
"""
Remove those unnecessary marks inside texts.
Arg:
text: A string that could be either posts or comments.
Return:
new_text: A string which is a clean sentence.
"""
# remove HTML tags
new_text = re.sub('<.*?>', '', text)
# remove URL
new_text = re.sub('http\S+', '', new_text)
# replace number with <NUM> token
new_text = re.sub('\d+', ' NUM ', new_text)
return new_text
def get_wordnet_pos(tag):
"""
Transform a positional tag to its corresponding WordNet format.
Arg:
tag: A positional tag from pos_tag function.
Return:
The associated wordnet format given by a tag.
"""
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def token_lemmatize(token, lemmatizer):
"""
Lemmatize a token to convert a token back to its root form.
When dealing with punctuation marks or emojis, simply return them as usual.
Arg:
token: A word in the string type.
lemmatizer: The object from wordnet lemmatizer.
Return:
token in its root form.
"""
if token == 'NUM':
# keep NUM token as usual
return token
elif token in string.punctuation:
# keep punctuation marks as usual
return token
elif token in UNICODE_EMOJI:
# keep emojis
return token
elif token.isalpha():
# consider English words
token, tag = pos_tag([token])[0][0], pos_tag([token])[0][1]
return lemmatizer.lemmatize(token, get_wordnet_pos(tag))
# else:
# # transform those nonwords as the token NOWORD
# token = 'NONWORD'
# return token
def text_lemmatize(text, lemmatizer):
"""
Apply lemmatization on the raw texts to convert the words in texts back to their root
forms. Before lemmatization, remove unnecessary marks and stopwords to keep only the
meaningful words.
Arg:
text: A string text.
lemmatizer: An object of WordNetLemmatizer.
Return:
lem_words: A list of lemmatized words.
"""
# remove unnecessary marks and tokenize
tokens = word_tokenize(remove_marks(text))
# remove stopwords
filtered_tokens = [word for word in tokens if word not in stopwords.words('english')]
# lemmatize the tokenized texts
lem_words = []
lem_words += list(map(token_lemmatize, filtered_tokens, repeat(lemmatizer)))
return lem_words
def compute_tokens(subreddit_convs_concise):
"""
Given the text data from a subreddit, lemmatize and compute the word tokens using the defined function, text_lemmatize.
Before that, remove the newline tag and expanding the English contraction.
The reason why the progress_bar is set to false is because of Google Colab's memory limitation.
If it's not the problem in your local machine, you could simply convert it to be true to check the processing status.
Arg:
subreddit_convs_concise: A conversation dataframe from a subreddit.
Return:
subreddit_tokens: A series with each row containing a list of word tokens from either post or comment.
"""
# copy the text column from original dataframe
subreddit_text = subreddit_convs_concise['text'].copy()
# expanding contraction
subreddit_text = subreddit_text.swifter.progress_bar(False).apply(lambda text: text.replace('\n', ' '))\
.swifter.progress_bar(False).apply(lambda text: ' '.join([contractions.fix(word) for word in text.split()]))
# lemmatize
lemmatizer = WordNetLemmatizer()
subreddit_tokens = subreddit_text.swifter.progress_bar(False).apply(lambda text: text_lemmatize(text, lemmatizer))
return subreddit_tokens
def compute_turn_distribution(df):
"""
Given a conversation dataframe from a subreddit (note that the dataframe is in the concise format indicated by Supervisor),
find out the dialog turn distribution.
Arg:
df: A conversation dataframe from a subreddit.
Return:
turn_dist: A series about dialog turn distribution.
"""
turn_dist = df.groupby('conversation id').size().value_counts().sort_index()
turn_dist = pd.DataFrame(turn_dist).reset_index().rename(columns = {'index': 'turns', 0: 'count'})
return turn_dist
def extract_turn_10_more(df):
"""
Given a concise conversation dataframe, extract those with 10 or more dialog turns.
Arg:
df: A conversation dataframe from a subreddit.
Return:
turn_10_more: A dataframe containing only those conversations with 10 or more turns.
"""
turn_dist = df.groupby('conversation id').size()
turn_dist_10_more_index = turn_dist[turn_dist >= 10].index
turn_10_more = df[df['conversation id'].isin(list(turn_dist_10_more_index))]
return turn_10_more
def remove_newline(df):
"""
For each text in either post or comment, remove the newline tag.
Arg:
df: A given conversation dataframe from a certain subreddit.
Return:
df: A cleaner conversation dataframe without the newline tags.
"""
df['text'] = df['text'].swifter.progress_bar(False).apply(lambda text: text.replace('\n', ' '))
df['text'] = df['text'].swifter.progress_bar(False).apply(lambda text: text.replace("\\", ''))
return df
def remove_toxicity(df):
"""
Use parallel computing. Consider only one post at each time.
In each post, detect the toxicity and remove the following dialog turns.
Arg:
df: A given conversation dataframe from a certain subreddit.
Return:
df_clean: A cleaner version of the conversation dataframe with no toxic words.
"""
# initialize clean conversation dataframe
df_clean = pd.DataFrame(columns = ['conversation id', 'subreddit', 'post title', 'author', 'dialog turn', 'text'])
# consider each post
df_post = df.reset_index().drop('index', axis = 1)
clean_row_list = []
# iterate over each comment from df_link_id
for i, row in df_post.iterrows():
if predict_prob([row['text']])[0] > 0.95 and row['dialog turn'] > 1:
break
else:
clean_row_list.append({'conversation id': row['conversation id'], 'subreddit': row['subreddit'],
'post title': row['post title'], 'author': row['author'],
'dialog turn': row['dialog turn'], 'text': row['text']})
df_clean = df_clean.append(pd.DataFrame(clean_row_list))
return df_clean
def extract_toxicity(df):
"""
Use parallel computing. Consider only one post at each time.
In each post, extract the toxic texts.
Arg:
df: A given conversation dataframe from a certain subreddit.
Return:
df_toxic: A conversation dataframe with exclusively toxic words.
"""
# initialize clean conversation dataframe
df_toxic = pd.DataFrame(columns = ['conversation id', 'subreddit', 'post title', 'author', 'dialog turn', 'text'])
# consider each post
df_post = df.reset_index().drop('index', axis = 1)
toxic_row_list = []
# iterate over each comment from df_link_id
for i, row in df_post.iterrows():
if predict_prob([row['text']])[0] > 0.95 and row['dialog turn'] > 1:
# record the toxic text
toxic_row_list.append({'conversation id': row['conversation id'], 'subreddit': row['subreddit'],
'post title': row['post title'], 'author': row['author'],
'dialog turn': row['dialog turn'], 'text': row['text']})
df_toxic = df_toxic.append( | pd.DataFrame(toxic_row_list) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
| tm.assert_isinstance(result[0], Timestamp) | pandas.util.testing.assert_isinstance |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = | Series(data, dtype=float_numpy_dtype) | pandas.Series |
from kfp.components import InputPath, OutputPath
def get_ml_dataset(
features_dataset_path : InputPath('DataFrame'),
target_dataset_path : InputPath('DataFrame'),
tech_indi_dataset_path : InputPath('DataFrame'),
ml_dataset_path : OutputPath('DataFrame')
):
import pandas as pd
df_feats = | pd.read_pickle(features_dataset_path) | pandas.read_pickle |
#!/usr/bin/env python3
from functools import wraps
import os.path
import glog as log
import numpy as np
import pandas as pd
import datasets
CACHE_DIR = "./cache"
TOY_TABLES = {
"A": pd.DataFrame({"x": [1, 2, 3]}),
"B": pd.DataFrame({
"x": [1, 2, 2, 2, 4],
"y": [10, 20, 20, 30, 30],
"z": [100, 100, 100, 100, 200],
}),
"C": pd.DataFrame({"y": [10, 20, 20, 40]}),
"D": | pd.DataFrame({"z": [100, 100, 200, 300]}) | pandas.DataFrame |
from typing import Type, Callable, Tuple, Union
import numpy as np
import pandas as pd
import pytest
from py4j.java_gateway import JVMView
from keanu import set_deterministic_state
from keanu.context import KeanuContext
from keanu.vartypes import tensor_arg_types, primitive_types, numpy_types, pandas_types
from keanu.vertex import Gaussian, Const, UniformInt, Bernoulli, IntegerProxy, Double
from keanu.vertex.base import Vertex
@pytest.fixture
def jvm_view():
from py4j.java_gateway import java_import
jvm_view = KeanuContext().jvm_view()
java_import(jvm_view, "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.GaussianVertex")
return jvm_view
def assert_vertex_value_equals_scalar(vertex: Vertex, expected_type: Type, scalar: primitive_types) -> None:
vertex_value = vertex.get_value()
assert vertex_value == scalar
assert type(vertex_value) == numpy_types
assert vertex_value.shape == ()
assert vertex_value.dtype == expected_type
def assert_vertex_value_equals_ndarray(vertex: Vertex, expected_type: Type, ndarray: numpy_types) -> None:
vertex_value = vertex.get_value()
expected_value = ndarray.astype(expected_type)
assert np.array_equal(vertex_value, expected_value)
assert np.issubdtype(vertex_value.dtype, expected_type)
def assert_vertex_value_equals_pandas(vertex: Vertex, expected_type: Type, pandas: pandas_types) -> None:
get_value = vertex.get_value()
expected_value = pandas.values.astype(expected_type).reshape(get_value.shape)
assert np.array_equal(get_value, expected_value)
assert np.issubdtype(get_value.dtype, expected_type)
def test_can_pass_scalar_to_vertex() -> None:
gaussian = Gaussian(0., 1.)
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_ndarray_to_vertex() -> None:
gaussian = Gaussian(np.array([0.1, 0.4]), np.array([0.4, 0.5]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_pandas_dataframe_to_vertex() -> None:
gaussian = Gaussian(pd.DataFrame(data=[0.1, 0.4]), pd.DataFrame(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2, 1)
def test_can_pass_pandas_series_to_vertex() -> None:
gaussian = Gaussian(pd.Series(data=[0.1, 0.4]), pd.Series(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_vertex_to_vertex(jvm_view: JVMView) -> None:
mu = Gaussian(0., 1.)
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", mu, Const(1.))
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_array_to_vertex(jvm_view: JVMView) -> None:
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", [3, 3], Const(0.), Const(1.))
sample = gaussian.sample()
assert sample.shape == (3, 3)
def test_cannot_pass_generic_to_vertex(jvm_view: JVMView) -> None:
class GenericExampleClass:
pass
with pytest.raises(ValueError, match=r"Can't parse generic argument. Was given {}".format(GenericExampleClass)):
Vertex( # type: ignore # this is expected to fail mypy
jvm_view.GaussianVertex, "gaussian", GenericExampleClass(), GenericExampleClass())
def test_int_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1, 2], [3, 4]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.int64 or value.dtype == np.int32
assert (value == ndarray).all()
def test_float_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1., 2.], [3., 4.]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert (value == ndarray).all()
def test_boolean_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[True, True], [False, True]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.bool_
assert (value == ndarray).all()
def test_scalar_vertex_value_is_a_numpy_array() -> None:
scalar = 1.
vertex = Const(scalar)
value = vertex.get_value()
assert type(value) == numpy_types
assert value.shape == ()
assert value.dtype == float
assert value == scalar
def test_vertex_sample_is_a_numpy_array() -> None:
mu = np.array([[1., 2.], [3., 4.]])
sigma = np.array([[.1, .2], [.3, .4]])
vertex = Gaussian(mu, sigma)
value = vertex.sample()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert value.shape == (2, 2)
def test_get_connected_graph() -> None:
gaussian = Gaussian(0., 1.)
connected_graph = set(gaussian.iter_connected_graph())
assert len(connected_graph) == 3
def test_id_str_of_downstream_vertex_is_higher_than_upstream() -> None:
hyper_params = Gaussian(0., 1.)
gaussian = Gaussian(0., hyper_params)
hyper_params_id = hyper_params.get_id()
gaussian_id = gaussian.get_id()
assert type(hyper_params_id) == tuple
assert type(gaussian_id) == tuple
assert hyper_params_id < gaussian_id
def test_construct_vertex_with_java_vertex() -> None:
java_vertex = Gaussian(0., 1.).unwrap()
python_vertex = Vertex._from_java_vertex(java_vertex)
assert tuple(java_vertex.getId().getValue()) == python_vertex.get_id()
def test_java_collections_to_generator() -> None:
gaussian = Gaussian(0., 1.)
java_collections = gaussian.unwrap().getConnectedGraph()
python_list = list(Vertex._to_generator(java_collections))
java_vertex_ids = [Vertex._get_python_id(java_vertex) for java_vertex in java_collections]
assert java_collections.size() == len(python_list)
assert all(type(element) == Double and element.get_id() in java_vertex_ids for element in python_list)
def test_get_vertex_id() -> None:
gaussian = Gaussian(0., 1.)
java_id = gaussian.unwrap().getId().getValue()
python_id = gaussian.get_id()
assert all(value in python_id for value in java_id)
def test_ids_are_reset() -> None:
gaussian = Gaussian(0., 1.)
set_deterministic_state()
gaussian2 = Gaussian(0., 1.)
assert gaussian.get_id() == gaussian2.get_id()
@pytest.mark.parametrize("vertex, expected_type", [(Gaussian(0., 1.), np.floating), (UniformInt(0, 10), np.integer),
(Bernoulli(0.5), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
(pd.Series(data=[5.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1, 2, 3]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1., 2., 3.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True, False, False]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[4]]), assert_vertex_value_equals_pandas),
( | pd.DataFrame(data=[[5.]]) | pandas.DataFrame |
"""
See here https://stackoverflow.com/questions/52371329/fast-spearman-correlation-between-two-pandas-dataframes
Calculate correlation between two matrix, row by row
"""
from numba import njit
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
from concurrent.futures import ProcessPoolExecutor, as_completed
import anndata
import scanpy as sc
import xarray as xr
from sklearn.impute import SimpleImputer
@njit
def _mean(a):
n = len(a)
b = np.empty(n)
for i in range(n):
b[i] = a[i].mean()
return b
@njit
def _std(a):
n = len(a)
b = np.empty(n)
for i in range(n):
b[i] = a[i].std()
return b
@njit
def _corr(a, b, row, col):
"""
Correlation between rows in a and b, no nan value
"""
_, k = a.shape
mu_a = _mean(a)
mu_b = _mean(b)
sig_a = _std(a)
sig_b = _std(b)
out = np.zeros(shape=row.shape, dtype=np.float32)
for idx in range(out.size):
i = row[idx]
j = col[idx]
_sig_a = sig_a[i]
_sig_b = sig_b[j]
if _sig_a == 0 or _sig_b == 0:
# if any variable std == 0
out[idx] = np.nan
else:
out[idx] = (a[i] - mu_a[i]) @ (b[j] -
mu_b[j]) / k / _sig_a / _sig_b
return out
def _corr_preprocess(da, sample_mch, sample_mcg, cpu=1):
imputer = SimpleImputer(copy=False)
df = da.to_pandas()
imputer.fit_transform(df)
assert df.isna().values.sum() == 0
adata = anndata.AnnData(X=df.values,
obs= | pd.DataFrame([], index=df.index) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 5.0.3
# in conjunction with Tcl version 8.6
# Feb 08, 2021 09:54:12 PM +03 platform: Windows NT
# -*- coding: utf-8 -*-
from logging import disable
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
import chromedriver_autoinstaller
import time
import pandas as pd
from datetime import timedelta
from datetime import datetime
# import tkinter as tk
import tkinter as tk
from tkinter import messagebox, DISABLED, NORMAL, simpledialog
from tkinter.filedialog import askopenfilename
import sys
import os
#Driver setup
#Driver setup
path = "driver/config.csv"
if not os.path.exists(path):
data={
"username":[],
"password":[],
"late_time":[]
}
df = pd.DataFrame(data=data)
df.to_csv("driver/config.csv", encoding="utf-8-sig", index=False)
import madrasti_fetcher_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = Toplevel1 (root)
madrasti_fetcher_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''
global w, w_win, root
#rt = root
root = rt
w = tk.Toplevel (root)
top = Toplevel1 (w)
madrasti_fetcher_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def donate(self):
window = tk.Toplevel()
window.geometry("290x294+688+270")
window.minsize(148, 1)
window.maxsize(1684, 1025)
window.resizable(1, 1)
window.title("BuY me A COFFEE!")
window.configure(background="#d9d9d9")
window.iconbitmap("driver/icon-rules-book-96.ico")
self.Btndonate = tk.Button(window)
self.Btndonate.place(relx=-0.034, rely=-0.035, height=303, width=306)
self.Btndonate.configure(activebackground="#ececec")
self.Btndonate.configure(activeforeground="#000000")
self.Btndonate.configure(background="#d9d9d9")
self.Btndonate.configure(disabledforeground="#a3a3a3")
self.Btndonate.configure(foreground="#000000")
self.Btndonate.configure(highlightbackground="#d9d9d9")
self.Btndonate.configure(highlightcolor="black")
global _img22
_img22 = tk.PhotoImage(file="driver/qr.png")
self.Btndonate.configure(image=_img22)
self.Btndonate.configure(pady="0")
self.Btndonate.configure(relief="flat")
self.Btndonate.configure(command=window.destroy)
def startapp(self):
global driver, late_time
# messagebox.showerror("xx","errrrrrrrrrrrrr")
self.Btnstart["state"] = DISABLED
self.Btnkillchrome["state"] = NORMAL
self.Btngheyab["state"] = NORMAL
self.Btnaddgeyab["state"] = NORMAL
self.Btnhw["state"] = NORMAL
self.Btnactive["state"] = NORMAL
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "normal"
options = Options()
options.add_argument("--log-level=3")
chromedriver_autoinstaller.install(True) # install driver
driver = webdriver.Chrome(options=options, desired_capabilities=caps)
# driver = webdriver.Chrome(options=options, desired_capabilities=caps, executable_path='driver/chromedriver.exe')
driver.implicitly_wait(30)
base_url = "https://www.google.com/"
verificationErrors = []
accept_next_alert = True
df_settings = pd.read_csv("driver/config.csv")
username = str(df_settings.iloc[0,0])
password = str(df_settings.iloc[0,1])
late_time = df_settings.iloc[0,2]
if username == " " and password == " ":
driver.get("https://schools.madrasati.sa/")
else:
driver.get("https://schools.madrasati.sa/")
time.sleep(4)
# Login
driver.find_element_by_xpath("//*[@id='Beneficiaries']/div/div[1]/div[2]/a/div/div").click()
driver.find_element_by_id("i0116").send_keys(username)
time.sleep(1)
driver.find_element_by_id("idSIButton9").click()
time.sleep(1)
driver.find_element_by_id("i0118").send_keys(password)
time.sleep(1)
driver.find_element_by_id("idSIButton9").click()
time.sleep(1)
driver.find_element_by_id("idSIButton9").click()
def settings(self):
_username_get = simpledialog.askstring("اعدادات","ادخل البريد الالكتروني لحسابك في منصة مدرستي:")
_username = [_username_get]
if _username_get == None:
pass
else:
_password_get = simpledialog.askstring("<PASSWORD>","ادخل كلمة المرور", show="*")
_password = [_password_get]
if _password_get == None:
pass
else:
_late_time_get = simpledialog.askstring("اعدادات","يرصد للطالب (تأخر) اذا مر على زمن الحصة الدراسية (ضع الرقم بالدقائق)")
if _late_time_get == "" or (_late_time_get == None) or len(_late_time_get) > 2:
_late_time_get = "15"
_late_time = [_late_time_get]
if (_username_get == "") or (_password_get == ""):
_username = " "
_password = " "
if (_username_get == None) or (_password_get == None):
pass
else:
data={
"username":_username,
"password":_password,
"late_time":_late_time
}
df_settings = pd.DataFrame(data)
df_settings.to_csv("driver/config.csv", encoding = "UTF-8-sig", index = False)
messagebox.showinfo("رسالة","تم حفظ الاعدادات بنجاح.")
def reformat_time(self):
# Clean DF for reformat Time
df = pd.DataFrame()
file_path = askopenfilename(title="choose a file", initialdir="Downloads", filetype=[("Csv file","*.csv")])
df = pd.read_csv(f"{file_path}", sep="\t" ,encoding="utf-16le", engine="python")
df.reset_index(inplace=True)
df.columns = ["Full Name","User Action","Timestamp"]
# print(df)
# df = pd.read_clipboard()
clean_df = | pd.DataFrame(columns=["Full Name", "User Action", "Timestamp"]) | pandas.DataFrame |
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
import pandas as pd
from models import RnnVersion3
import gc
from keras.models import Model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping,Callback
from tqdm import tqdm_notebook
user_app_actived = pd.read_csv('../../data/original_data/user_app_actived.csv',names=['uId', 'appId'])
usage_list = pd.read_csv('../../data/processed_data/usage_app_info.csv') #重采样的usage_app
usage_appId = pd.read_csv('../../data/processed_data/usage_appId.csv') #使用表的app词典
appId = pd.read_csv('../../data/processed_data/appId.csv') #激活表的app词典
user_app_actived['app_list'] = user_app_actived.appId.str.split('#')
import ast
from tqdm import tqdm
usage_train = []
for idx in tqdm(usage_list.appId):
usage_train.append(ast.literal_eval(idx))
usage_list['app_list'] = usage_train
user_app_actived.drop('appId',axis=1,inplace=True)
usage_list.drop('appId',axis=1,inplace=True)
user_app_actived = pd.merge(user_app_actived, usage_list, how='left', on='uId')
result = []
for index,row in tqdm(user_app_actived.iterrows()):
try:
result.append(np.sort(list(set(row['app_list_x']) | set(row['app_list_y']))))
except:
result.append(row['app_list_x'])
user_app_actived['app_list'] = result
user_app_actived.drop(['app_list_x','app_list_y'],axis=1,inplace =True)
del usage_list
gc.collect()
x_train = pd.read_csv('../../data/original_data/age_train.csv',names=['uId','age_group'],dtype={'uId':np.int32, 'age_group':np.int8})
x_test = pd.read_csv('../../data/original_data/age_test.csv',names=['uId'],dtype={'uId':np.int32})
x_train = pd.merge(x_train, user_app_actived, how='left', on='uId')
x_test = pd.merge(x_test, user_app_actived, how='left', on='uId')
y_train = x_train.age_group - 1
x_train = x_train.drop('age_group',axis=1)
del user_app_actived
gc.collect()
usage_appId = | pd.read_csv('../../data/processed_data/usage_appId_top_num100000.csv') | pandas.read_csv |
from sail.sobol_lib import i4_sobol_generate
import numpy as np
import pandas as pd
def sobol2indx(sobSet, sobPoints, d, edges):
# sobSet is DataFrame
# sobPoints is list with indices
# print("sobSet")
# print(sobSet)
# print("sobPoints")
# print(sobPoints)
# print("edges")
# print(edges)
sampleCoords = sobSet.iloc[sobPoints, :] # TODO: Error : Positional Indexes out of bounds
# print("sampleCoords")
# print(sampleCoords)
nans = np.empty([len(sobPoints),d.nDims])
nans[:] = np.nan
binIndx = | pd.DataFrame(data=nans) | pandas.DataFrame |
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats, integrate
# load msncodes
msncodes = | pd.read_csv("data/csv/original/msncodes.csv") | pandas.read_csv |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy
from xgboost import XGBClassifier
from sklearn.cross_validation import train_test_split
#from sklearn.metrics import roc_auc_score
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
np.random.seed(42)
df_train, df_test = pd.read_csv('train.csv'), pd.read_csv('test.csv')
feature_cols = df_train.columns[1:-1]
X_train = df_train[feature_cols]
y_train = df_train['TARGET']
X_test = df_test[feature_cols]
id_test = df_test['ID']
sd = X_train.std()
empties = sd[sd==0].index
X_train, X_test = X_train.drop(empties,1), X_test.drop(empties,1)
'''
v_cols = None
for col in X_train.columns:
if not v_cols:
v_cols = [col]
else:
valid = True
for valid_col in v_cols:
if all(X_train[col]==X_train[valid_col]):
valid=False
break
if valid:
v_cols.append(col)
X_train, X_test = X_
train[v_cols], X_test[v_cols]
'''
dependencies = []
feature_cols = X_train.columns
Q, R = np.linalg.qr(np.matrix(X_train))
indep_locs = np.where(abs(R.diagonal())>1e-7)[1]
for i, col in enumerate(feature_cols):
if i not in indep_locs:
dependencies.append(col)
X_train, X_test = X_train.drop(dependencies,1), X_test.drop(dependencies,1)
'''
clf = ExtraTreesClassifier()
clf.fit(train, labels)
model = SelectFromModel(clf, prefit=True)
return (pd.DataFrame(model.transform(train)),
pd.DataFrame(model.transform(test)))
'''
booster = XGBClassifier(
n_estimators = 409,
learning_rate = 0.0202048,
max_depth = 5,
subsample = 0.6815,
colsample_bytree = 0.701
)
X_fit, X_val, y_fit, y_val = train_test_split(X_train, y_train,
test_size=0.25, stratify=y_train)
booster.fit(X_fit, y_fit, eval_metric="auc", eval_set=[(X_val, y_val)])
# predicting
y_pred = booster.predict_proba(X_test)[:,1]
submission = | pd.DataFrame({'TARGET':y_pred}, index=id_test) | pandas.DataFrame |
import plotly.graph_objects as go
import plotly.express as px
import numpy as np
import pandas as pd
# from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
roc_curve,
roc_auc_score,
precision_recall_curve,
auc,
average_precision_score,
)
def make_dummies(y):
y_onehot = | pd.get_dummies(y, prefix="bin") | pandas.get_dummies |
import os
import unittest
import pandas as pd
import argopandas.index as dfi
from argopandas.mirror import FileMirror
class TestDataFrameIndex(unittest.TestCase):
def setUp(self) -> None:
this_file = os.path.dirname(__file__)
mirror_dir = "argo-test-mirror"
self.mirror = FileMirror(os.path.join(this_file, mirror_dir))
def test_subset(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_prof.nc'}])
df = dfi.DataFrameIndex(df)
self.assertIsInstance(df[[]], dfi.DataFrameIndex)
self.assertIsInstance(df.iloc[[], :], dfi.DataFrameIndex)
def test_info(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_prof.nc'}])
df = dfi.DataFrameIndex(df, _mirror=self.mirror)
self.assertIn('DATA_TYPE', df.info.keys())
def test_prof(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/profiles/D2900313_002.nc'}])
df = dfi.ProfIndex(df, _mirror=self.mirror)
self.assertIn('PRES', df.levels.keys())
self.assertIn('PRES', df.levels_('PRES').keys())
self.assertIn('PLATFORM_NUMBER', df.prof.keys())
self.assertIn('PLATFORM_NUMBER', df.prof_('PLATFORM_NUMBER').keys())
self.assertIn('PARAMETER', df.calib.keys())
self.assertIn('STATION_PARAMETERS', df.param.keys())
self.assertIn('HISTORY_DATE', df.history.keys())
def test_traj(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_Rtraj.nc'}])
df = dfi.TrajIndex(df, _mirror=self.mirror)
self.assertIn('LATITUDE', df.measurement.keys())
self.assertIn('LATITUDE', df.measurement_('LATITUDE').keys())
self.assertIn('JULD_DESCENT_START', df.cycle.keys())
self.assertIn('JULD_DESCENT_START', df.cycle_('JULD_DESCENT_START').keys())
self.assertIn('TRAJECTORY_PARAMETERS', df.param.keys())
self.assertIn('HISTORY_DATE', df.history.keys())
def test_tech(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_tech.nc'}])
df = dfi.TechIndex(df, _mirror=self.mirror)
self.assertIn('CYCLE_NUMBER', df.tech_param.keys())
def test_meta(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_meta.nc'}])
df = dfi.MetaIndex(df, _mirror=self.mirror)
self.assertIn('CONFIG_PARAMETER_VALUE', df.config_param.keys())
self.assertIn('CONFIG_PARAMETER_NAME', df.config_param.keys())
self.assertIn('CONFIG_MISSION_NUMBER', df.missions.keys())
self.assertIn('TRANS_SYSTEM', df.trans_system.keys())
self.assertIn('POSITIONING_SYSTEM', df.positioning_system.keys())
self.assertIn('LAUNCH_CONFIG_PARAMETER_NAME', df.launch_config_param.keys())
self.assertIn('SENSOR', df.sensor.keys())
self.assertIn('PARAMETER', df.param.keys())
def test_zero_length(self):
df = pd.DataFrame({'file': []})
df = dfi.DataFrameIndex(df)
self.assertEqual({k: list(v) for k, v in df.info.items()}, {'file': []})
class TestDataFrameIndexHelpers(unittest.TestCase):
def setUp(self) -> None:
# recs = prof_all.iloc[[0, 1000, 100000]].to_records()
# [{k: r[k] for k in prof_all.columns} for r in recs]
records = [
{
'file': 'aoml/13857/profiles/R13857_001.nc',
'date': pd.Timestamp('1997-07-29 20:03:00+0000', tz='UTC'),
'latitude': 0.267,
'longitude': -16.032,
'ocean': 'A',
'profiler_type': 845,
'institution': 'AO',
'date_update': pd.Timestamp('2018-10-11 18:05:20+0000', tz='UTC')
},
{
'file': 'aoml/15854/profiles/R15854_030.nc',
'date': pd.Timestamp('1998-07-01 02:22:54+0000', tz='UTC'),
'latitude': -5.997,
'longitude': -9.028,
'ocean': 'A',
'profiler_type': 845,
'institution': 'AO',
'date_update': pd.Timestamp('2018-10-11 18:11:16+0000', tz='UTC')
},
{
'file': 'aoml/1901499/profiles/D1901499_139.nc',
'date': pd.Timestamp('2015-03-09 07:39:02+0000', tz='UTC'),
'latitude': 7.959,
'longitude': -52.677,
'ocean': 'A',
'profiler_type': 851,
'institution': 'AO',
'date_update': | pd.Timestamp('2018-07-17 10:28:15+0000', tz='UTC') | pandas.Timestamp |
import logging
import math
import numpy as np
import pandas as pd
from numpy import savetxt
from common.constants import Constants
from sklearn.metrics.pairwise import cosine_similarity
class Trainner:
def __init__(self):
self.logger = logging.getLogger(__name__)
def convert_rating_to_user_item(self, file_name):
try:
path = Constants.TRAINING_SUB_DATASETS_DIRECTORY
path_file_in = path + file_name
ratings = pd.read_csv(path_file_in, delimiter=',')
file_user_hotel = "user_hotel_" + file_name
users = ratings.loc[0:, 'user_id'].unique()
hotels = ratings.loc[0:, 'hotel_id'].unique()
matrix_rating = pd.DataFrame(index=users, columns=hotels, dtype='float64')
total_row = len(ratings)
for i in range(0, total_row):
try:
userId = ratings.loc[i, 'user_id']
hotelId = ratings.loc[i, 'hotel_id']
score = ratings.loc[i, 'rate_star']
matrix_rating.loc[userId, hotelId] = score
self.logger.info(str(i + 1) + "/" + str(total_row))
except Exception as e2:
self.logger.error(e2)
for i in range(0, len(users)):
for j in range(0, len(hotels)):
userId = users[i]
hotelId = hotels[j]
score = matrix_rating.loc[userId, hotelId]
if math.isnan(score):
matrix_rating.loc[userId, hotelId] = 0
path_normal_expor = Constants.USER_ITEM_DATASETS_NORMAL_DIRECTORY + file_user_hotel
matrix_rating.to_csv(path_normal_expor, sep=',')
# matrix_rating_svd.insert(0, " ", np.array(users))
logging.info("Parse to csv done !")
# tinh svd
self.logger.info("Start calculate sdv")
userIds = ratings.loc[0:, 'user_id'].unique().tolist()
hotelIds = ratings.loc[0:, 'hotel_id'].unique().tolist()
self.calculate_sdv_matrix_rating(matrix_rating, hotelIds, userIds, file_user_hotel)
except Exception as e1:
self.logger.exception(e1)
def calculate_sdv_matrix_rating(self, matrix_rating, items, users, path_file_user_hotel):
u, s, vh = np.linalg.svd(matrix_rating, full_matrices=False)
smat = np.diag(s)
matrix_A_xap_xi = u.dot(smat).dot(vh)
matrix_A_xap_xi = pd.DataFrame.from_records(matrix_A_xap_xi, index=users, columns=items)
path_out = Constants.USER_ITEM_DATASETS_DIRECTORY + path_file_user_hotel
matrix_A_xap_xi.to_csv(path_out, sep=',')
def calculate_similar_item_item(self, file_name):
path = Constants.USER_ITEM_DATASETS_DIRECTORY
file_name_user_hotel = path + file_name
user_item_rating = pd.read_csv(file_name_user_hotel, index_col=0)
items = user_item_rating.columns.tolist()
item_base = | pd.DataFrame(index=items, columns=items) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_operation_capacity.py
@time: 2019-05-30
"""
import gc
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
import six, pdb
import pandas as pd
from pandas.io.json import json_normalize
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorBasicDerivation(object):
"""
基础衍生类因子
"""
def __init__(self):
__str__ = 'factor_basic_derivation'
self.name = '基础衍生'
self.factor_type1 = '基础衍生'
self.factor_type2 = '基础衍生'
self.description = '基础衍生类因子'
@staticmethod
def EBIT(tp_derivation, factor_derivation, dependencies=['total_profit', 'interest_expense', 'interest_income', 'financial_expense']):
"""
:name: 息税前利润(MRQ)
:desc: [EBIT_反推法]息税前利润(MRQ) = 利润总额 + 利息支出 - 利息收入
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
func = lambda x: (x[0] + x[1] - x[2]) if x[1] is not None and x[2] is not None else (x[0] + x[3] if x[3] is not None else None)
management['EBIT'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBIT']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITDA(tp_derivation, factor_derivation, dependencies=['total_profit', 'income_tax'],
dependency=['EBIT']):
"""
:name: 息前税后利润(MRQ)
:desc: 息前税后利润(MRQ)=EBIT(反推法)*(if 所得税&利润总额都>0,则1-所得税率,否则为1),所得税税率 = 所得税/ 利润总额
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: None if x[0] is None or x[1] is None or x[2] is None or x[1] == 0 else (x[0] * (1 - x[2] / x[1]) if x[1] > 0 and x[2] > 0 else x[0])
management['EBITDA'] = management[dependency].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBITDA']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def DepAndAmo(tp_derivation, factor_derivation, dependencies=['fixed_assets_depreciation',
'intangible_assets_amortization',
'defferred_expense_amortization']):
"""
:name: 折旧和摊销(MRQ)
:desc: 固定资产折旧 + 无形资产摊销 + 长期待摊费用摊销
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['DepAndAmo'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['DepAndAmo']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFF(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'cash_equivalents',
'total_current_liability',
'shortterm_loan',
'shortterm_bonds_payable',
'non_current_liability_in_one_year',
'total_current_assets_pre',
'cash_equivalents_pre',
'total_current_liability_pre',
'shortterm_loan_pre',
'shortterm_bonds_payable_pre',
'non_current_liability_in_one_year_pre',
'fix_intan_other_asset_acqui_cash',
],
dependency=['EBITDA', 'DepAndAmo']):
"""
:name: 企业自由现金流量(MRQ)
:desc: 息前税后利润+折旧与摊销-营运资本增加-资本支出 = 息前税后利润+ 折旧与摊销-营运资本增加-构建固定无形和长期资产支付的现金, 营运资本 = 流动资产-流动负债, 营运资金=(流动资产-货币资金)-(流动负债-短期借款-应付短期债券-一年内到期的长期借款-一年内到期的应付债券)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] + x[1] - (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) + (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) - x[8]if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None and \
x[6] is not None and \
x[7] is not None and \
x[8] is not None else None
management['FCFF'] = management[dependency].apply(func, axis=1)
management = management[['FCFF']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFE(tp_derivation, factor_derivation, dependencies=['borrowing_repayment',
'cash_from_borrowing',
'cash_from_bonds_issue'],
dependency=['FCFF']):
"""
:name: 股东自由现金流量(MRQ)
:desc: 企业自由现金流量-偿还债务所支付的现金+取得借款收到的现金+发行债券所收到的现金(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] + x[2] + x[3] if x[0] is not None and x[1] is not None and \
x[2] is not None and x[3] is not None else None
management['FCFE'] = management[dependency].apply(func, axis=1)
management = management[['FCFE']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NonRecGainLoss(tp_derivation, factor_derivation, dependencies=['np_parent_company_owners', 'np_cut']):
"""
:name: 非经常性损益(MRQ)
:desc: 归属母公司净利润(MRQ) - 扣非净利润(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NonRecGainLoss'] = management[dependencies].apply(func, axis=1)
management = management[['NonRecGainLoss']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetOptInc(tp_derivation, factor_derivation, sw_industry,
dependencies_er=['total_operating_revenue', 'total_operating_cost'],
dependencies_yh=['commission_income', 'net_profit', 'other_business_profits', 'operating_cost'],
dependencies_zq=['commission_income', 'net_profit', 'other_operating_revenue', 'operating_cost'],
dependencies_bx=['operating_revenue', 'operating_cost', 'fair_value_variable_income',
'investment_income', 'exchange_income']):
"""
:name: 经营活动净收益(MRQ)
:desc: 新准则(一般企业):营业总收入-营业总成本"
:unit: 元
:view_dimension: 10000
"""
industry2_set = ['430100', '370100', '410400', '450500', '640500', '510100', '620500', '610200', '330200',
'280400', '620400', '450200', '270500', '610300', '280300', '360300', '410100', '370400',
'280200', '730200', '710200', '720200', '640400', '270300', '110400', '220100', '240300',
'270400', '710100', '420100', '420500', '420400', '370600', '720100', '640200', '220400',
'330100', '630200', '610100', '370300', '410300', '220300', '640100', '490300', '450300',
'220200', '370200', '460200', '420200', '460100', '360100', '620300', '110500', '650300',
'420600', '460300', '720300', '270200', '630400', '410200', '280100', '210200', '420700',
'650200', '340300', '220600', '110300', '350100', '620100', '210300', '240200', '340400',
'240500', '360200', '270100', '230100', '370500', '110100', '460400', '110700', '110200',
'630300', '450400', '220500', '730100', '640300', '630100', '240400', '420800', '650100',
'350200', '620200', '210400', '420300', '110800', '360400', '650400', '110600', '460500',
'430200', '210100', '240100', '250100', '310300', '320200', '310400', '310200', '320100',
'260500', '250200', '450100', '470200', '260200', '260400', '260100', '440200', '470400',
'310100', '260300', '220700', '470300', '470100', '340100', '340200', '230200']
dependencies = list(set(dependencies_er + dependencies_yh + dependencies_bx + dependencies_zq))
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
management = pd.merge(management, sw_industry, how='outer', on='security_code').set_index('security_code')
if len(management) <= 0:
return None
management_tm = pd.DataFrame()
func = lambda x: x[0] + x[1] + x[2] - x[3] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None else None
# 银行 ['440100', '480100']
management_yh = management[management['industry_code2'].isin(['440100', '480100'])]
management_yh['NetOptInc'] = management_yh[dependencies_yh].apply(func, axis=1)
management_tm = management_tm.append(management_yh)
# 证券['440300', '490100']
management_zq = management[management['industry_code2'].isin(['440300', '490100'])]
management_zq['NetOptInc'] = management_zq[dependencies_zq].apply(func, axis=1)
management_tm = management_tm.append(management_zq)
func1 = lambda x: x[0] - x[1] - x[2] - x[3] - x[4] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None else None
# 保险['440400', '490200']
management_bx = management[management['industry_code2'].isin(['440400', '490200'])]
management_bx['NetOptInc'] = management_bx[dependencies_bx].apply(func1, axis=1)
management_tm = management_tm.append(management_bx)
func2 = lambda x: None if x[0] is None else (x[0] if x[1] is None else x[0] - x[1])
management_er = management[management['industry_code2'].isin(industry2_set)]
management_er['NetOptInc'] = management_er[dependencies_er].apply(func2, axis=1)
management_tm = management_tm.append(management_er)
dependencies = dependencies + ['industry_code2']
management_tm = management_tm[['NetOptInc']]
factor_derivation = pd.merge(factor_derivation, management_tm, how='outer', on="security_code")
return factor_derivation
@staticmethod
def WorkingCap(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'total_current_liability']):
"""
:name: 运营资本(MRQ)
:desc: 流动资产(MRQ)-流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['WorkingCap'] = management[dependencies].apply(func, axis=1)
management = management[['WorkingCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TangibleAssets(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets']):
"""
:name: 有形资产(MRQ)
:desc: 股东权益(不含少数股东权益)- (无形资产 + 开发支出 + 商誉 + 长期待摊费用 + 递延所得税资产)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - (x[1] + x[2] + x[3] + x[4] + x[5]) if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None else None
management['TangibleAssets'] = management[dependencies].apply(func, axis=1)
management = management[['TangibleAssets']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def RetainedEarnings(tp_derivation, factor_derivation, dependencies=['surplus_reserve_fund',
'retained_profit']):
"""
:name: 留存收益(MRQ)
:desc: 盈余公积MRQ + 未分配利润MRQ
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['RetainedEarnings'] = management[dependencies].apply(func, axis=1)
management = management[['RetainedEarnings']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeCurLb(tp_derivation, factor_derivation, dependencies=['bill_receivable',
'accounts_payable',
'advance_peceipts',
'salaries_payable',
'taxs_payable',
'accrued_expenses',
'other_payable',
'long_term_deferred_income',
'other_current_liability',
]):
"""
:name: 无息流动负债(MRQ)
:desc: 无息流动负债 = 应收票据+应付帐款+预收款项+应付职工薪酬+应交税费+其他应付款+预提费用+递延收益+其他流动负债
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7] + x[8] if x[0] is not None or \
x[1] is not None or \
x[2] is not None or \
x[3] is not None or \
x[4] is not None or \
x[5] is not None or \
x[6] is not None or \
x[7] is not None or \
x[8] is not None else None
management['InterestFreeCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeNonCurLb(tp_derivation, factor_derivation, dependencies=['total_non_current_liability',
'longterm_loan',
'bonds_payable']):
"""
:name: 无息非流动负债(MRQ)
:desc: 非流动负债合计 - 长期借款 - 应付债券
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['InterestFreeNonCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeNonCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestBearingLiabilities(tp_derivation, factor_derivation, dependencies=['total_liability'],
dependency=['InterestFreeCurLb', 'InterestFreeNonCurLb']):
"""
:name: 带息负债(MRQ)
:desc: 负债合计-无息流动负债-无息非流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependencies + dependency
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and \
x[1] is not None and \
x[2] is not None else None
management['InterestBearingLiabilities'] = management[dependency].apply(func, axis=1)
management = management[['InterestBearingLiabilities']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetDebt(tp_derivation, factor_derivation, dependencies=['cash_equivalents'],
dependency=['InterestBearingLiabilities']):
"""
:name: 净债务(MRQ)
:desc: 净债务 = 带息债务(MRQ) - 货币资金(MRQ)。 其中,带息负债 = 短期借款 + 一年内到期的长期负债 + 长期借款 + 应付债券 + 应付利息
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetDebt'] = management[dependency].apply(func, axis=1)
management = management[['NetDebt']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EquityPC(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners']):
"""
:name: 归属于母公司的股东权益(MRQ)
:desc: 归属于母公司的股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
management = management.rename(columns={'equities_parent_company_owners': 'EquityPC'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalInvestedCap(tp_derivation, factor_derivation, dependencies=['total_owner_equities' ],
dependency=['InterestBearingLiabilities']):
"""
:name: 全部投入资本(MRQ)
:desc: 股东权益+(负债合计-无息流动负债-无息长期负债)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
dependency = dependency + dependencies
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['TotalInvestedCap'] = management[dependency].apply(func, axis=1)
management = management[['TotalInvestedCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalAssets(tp_derivation, factor_derivation, dependencies=['total_assets']):
"""
:name: 资产总计(MRQ)
:desc: 资产总计(MRQ) balance
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_assets': 'TotalAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalFixedAssets(tp_derivation, factor_derivation, dependencies=['total_fixed_assets_liquidation']):
"""
:name: 固定资产合计(MRQ)
:desc: 固定资产合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_fixed_assets_liquidation': 'TotalFixedAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalLib(tp_derivation, factor_derivation, dependencies=['total_liability']):
"""
:name: 负债合计(MRQ)
:desc: 负债合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_liability': 'TotalLib'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def ShEquity(tp_derivation, factor_derivation, dependencies=['total_owner_equities']):
"""
:name: 股东权益(MRQ)
:desc: 股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_owner_equities': 'ShEquity'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def CashAndCashEqu(tp_derivation, factor_derivation, dependencies=['cash_and_equivalents_at_end']):
"""
:name: 期末现金及现金等价物(MRQ)
:desc: 期末现金及现金等价物(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'cash_and_equivalents_at_end': 'CashAndCashEqu'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue']):
"""
:name: 营业总收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_revenue': 'SalesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalOptCostTTM(tp_derivation, factor_derivation, dependencies=['total_operating_cost']):
"""
:name: 营业总成本(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总成本”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_cost': 'TotalOptCostTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def OptIncTTM(tp_derivation, factor_derivation, dependencies=['operating_revenue']):
"""
:name: 营业收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'operating_revenue': 'OptIncTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def GrossMarginTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 毛利(TTM) 营业毛利润
:desc: 根据截止指定日已披露的最新报告期“毛利”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: (x[0] - x[1]) / x[1] if x[1] != 0 and x[1] is not None else None
management['GrossMarginTTM'] = management[dependencies].apply(func, axis=1)
management = management[['GrossMarginTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesExpensesTTM(tp_derivation, factor_derivation, dependencies=['sale_expense']):
"""
:name: 销售费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“销售费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'SALESEsale_expenseXPE': 'SalesExpensesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def AdmFeeTTM(tp_derivation, factor_derivation, dependencies=['administration_expense']):
"""
:name: 管理费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“管理费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'administration_expense': 'AdmFeeTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FinFeeTTM(tp_derivation, factor_derivation, dependencies=['financial_expense']):
"""
:name: 财务费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“财务费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'financial_expense': 'FinFeeTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def PerFeeTTM(tp_derivation, factor_derivation, dependencies=['sale_expense',
'administration_expense',
'financial_expense',
]):
"""
:name: 期间费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“期间费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['PerFeeTTM'] = management[dependencies].apply(func, axis=1)
management = management[['PerFeeTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestExpTTM(tp_derivation, factor_derivation, dependencies=['interest_expense']):
"""
:name: 利息支出(TTM)
:desc: 根据截止指定日已披露的最新报告期“利息支出”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'interest_expense': 'InterestExpTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def MinorInterestTTM(tp_derivation, factor_derivation, dependencies=['minority_profit']):
"""
:name: 少数股东损益(TTM)
:desc: 根据截止指定日已披露的最新报告期“少数股东损益”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'minority_profit': 'MinorInterestTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def AssetImpLossTTM(tp_derivation, factor_derivation, dependencies=['asset_impairment_loss']):
"""
:name: 资产减值损失(TTM)
:desc: 根据截止指定日已披露的最新报告期“资产减值损失”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'asset_impairment_loss': 'AssetImpLossTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncFromOptActTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 经营活动净收益(TTM)
:desc: 根据截止指定日已披露的最新报告期“经营活动净收益”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetIncFromOptActTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetIncFromOptActTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncFromValueChgTTM(tp_derivation, factor_derivation, dependencies=['fair_value_variable_income',
'investment_income',
'exchange_income',
]):
"""
:name: 价值变动净收益(TTM)
:desc: 公允价值变动净收益+投资净收益+汇兑净收益
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['NetIncFromValueChgTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetIncFromValueChgTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def OptProfitTTM(tp_derivation, factor_derivation, dependencies=['operating_profit']):
"""
:name: 营业利润(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业利润”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'operating_profit': 'OptProfitTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetNonOptIncAndExpTTM(tp_derivation, factor_derivation, dependencies=['non_operating_revenue',
'non_operating_expense', ]):
"""
:name: 营业外收支净额(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业外收支净额”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1]
management['NetNonOptIncAndExpTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetNonOptIncAndExpTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITTTM(tp_derivation, factor_derivation, dependencies=['total_profit',
'interest_expense']):
"""
:name: 息税前利润(TTM)
:desc: [EBIT_反推]息税前利润 = 利润总额+利息支出
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1]
management['EBITTTM'] = management[dependencies].apply(func, axis=1)
management = management[['EBITTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def IncTaxTTM(tp_derivation, factor_derivation, dependencies=['income_tax']):
"""
:name: 所得税(TTM)
:desc:根据截止指定日已披露的最新报告期“所得税”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'income_tax': 'IncTaxTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalProfTTM(tp_derivation, factor_derivation, dependencies=['total_profit']):
"""
:name: 利润总额(TTM)
:desc: 根据截止指定日已披露的最新报告期“利润总额”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_profit': 'TotalProfTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncTTM(tp_derivation, factor_derivation, dependencies=['net_profit']):
"""
:name: 净利润(TTM)
:desc: 根据截止指定日已披露的最新报告期“净利润(含少数股东权益)”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'net_profit': 'NetIncTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetProfToPSTTM(tp_derivation, factor_derivation, dependencies=['np_parent_company_owners']):
"""
:name: 归属母公司股东的净利润(TTM)
:desc: 根据截止指定日已披露的最新报告期“归属母公司股东的净利润”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。注:交易日匹配财报数据披露日,业绩快报数据不参与计算
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'np_parent_company_owners': 'NetProfToPSTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetProfAfterNonRecGainsAndLossTTM(tp_derivation, factor_derivation, dependencies=['np_cut']):
"""
:name: 扣除非经常性损益后的净利润(TTM)
:desc: 根据截止指定日已披露的最新报告期“扣除非经常性损益后的净利润”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'np_cut': 'NetProfAfterNonRecGainsAndLossTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITFORPTTM(tp_derivation, factor_derivation, dependencies=['operating_revenue',
'operating_tax_surcharges',
'operating_cost',
'sale_expense',
'administration_expense',
'rd_expenses',
'service_commission_fee',
'asset_impairment_loss',
'other_earnings',
]):
"""
缺坏账损失, 存货跌价损失
:name: EBIT(TTM)
:desc: (营业收入-营业税金及附加)-(营业成本+利息支出+手续费及佣金支出+销售费用+管理费用+研发费用+坏账损失+存货跌价损失) +其他收益
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] - (x[2] + x[3] + x[4] + x[5] + x[6] + x[7]) + x[8] \
if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None and \
x[6] is not None and \
x[7] is not None and \
x[8] is not None else None
management['EBITFORPTTM'] = management[dependencies].apply(func, axis=1)
management = management[['EBITFORPTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITDATTM(tp_derivation, factor_derivation, dependencies=['total_profit',
'fixed_assets_depreciation',
'intangible_assets_amortization',
'defferred_expense_amortization'
]):
"""
:name: EBITDA(TTM)
:desc: [EBITDA(TTM)_正向]息税前利润 + 当期计提折旧与摊销
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] + x[3] if x[0] is not None and x[1] is not None and x[2] is not None and x[
3] is not None else None
management['EBITDATTM'] = management[dependencies].apply(func, axis=1)
management = management[['EBITDATTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def CashRecForSGAndPSTTM(tp_derivation, factor_derivation, dependencies=['goods_sale_and_service_render_cash']):
"""
:name: 销售商品提供劳务收到的现金(TTM)
:desc: 根据截止指定日已披露的最新报告期“销售商品提供劳务收到的现金”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'goods_sale_and_service_render_cash': 'CashRecForSGAndPSTTM'})
factor_derivation = | pd.merge(factor_derivation, management, how='outer', on="security_code") | pandas.merge |
import argparse
import numpy as np
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
import os
import scrublet as scr
import scipy.io
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
plt.rc('font', size=14)
plt.rcParams['pdf.fonttype'] = 42
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', type=str,help="The directory of input matrix")
parser.add_argument('-o','--output', type=str,help="The directory of output",default='./')
args = parser.parse_args()
input_dir = args.input
os.system('gunzip '+input_dir+"/*" )
counts_matrix = scipy.io.mmread(input_dir + '/matrix.mtx').T.tocsc()
genes = np.array(scr.load_genes(input_dir + '/genes.tsv', delimiter='\t', column=1))
print('Counts matrix shape: {} rows, {} columns'.format(counts_matrix.shape[0], counts_matrix.shape[1]))
print('Number of genes in gene list: {}'.format(len(genes)))
#os.system('gzip '+input_dir+"/*" )
scrub = scr.Scrublet(counts_matrix, expected_doublet_rate=0.06)
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=2,min_cells=3,min_gene_variability_pctl=85,n_prin_comps=30)
print('Running UMAP...')
scrub.set_embedding('UMAP', scr.get_umap(scrub.manifold_obs_, 10, min_dist=0.3))
print('Done.')
scrub.plot_embedding('UMAP', order_points=True);
plt.savefig(args.output+"/umap_doublet_plot.png")
plt.cla()
len_array = np.array(range(0,len(predicted_doublets)))
result = list(len_array[list(predicted_doublets)])
print("There are "+str(len(result))+" doublets.")
adata = sc.read_10x_mtx(
input_dir, # the directory with the `.mtx` file
var_names='gene_symbols' # use gene symbols for the variable names (variables-axis index)
)
print(adata)
barcodes=adata.obs.index.tolist()
genes=adata.var.index.tolist()
mat=adata.X.todense()
filtered= | pd.DataFrame(data=mat,columns=genes,index=barcodes) | pandas.DataFrame |
## Functions to support SPEI drought index analysis
## Code: EHU | SPEI data: SC
## 12 Sept 2019
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from datetime import date
import collections
## Constants associated with this analysis
yrs = np.linspace(1900, 2101, num=2412)
model_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GISS-E2-R', 'INMCM4', 'MIROC-ESM', 'NorESM1-M'] # all models used in comparison
scenarios = ['Rcp4p5', 'Rcp8p5'] # climate scenarios
basin_names = ['INDUS','TARIM','BRAHMAPUTRA','ARAL SEA','COPPER','GANGES','YUKON','ALSEK','SUSITNA','BALKHASH','STIKINE','SANTA CRUZ',
'FRASER','BAKER','YANGTZE','SALWEEN','COLUMBIA','ISSYK-KUL','AMAZON','COLORADO','TAKU','MACKENZIE','NASS','THJORSA','JOEKULSA A F.',
'KUSKOKWIM','RHONE','SKEENA','OB','OELFUSA','MEKONG','DANUBE','NELSON RIVER','PO','KAMCHATKA','RHINE','GLOMA','HUANG HE','INDIGIRKA',
'LULE','RAPEL','SANTA','SKAGIT','KUBAN','TITICACA','NUSHAGAK','BIOBIO','IRRAWADDY','NEGRO','MAJES','CLUTHA','DAULE-VINCES',
'KALIXAELVEN','MAGDALENA','DRAMSELV','COLVILLE']
def plot_basin_runmean(basin_id, permodel_dict,
which='diff', window_yrs=30, cmap_name='viridis',
show_labels=True, show_plot=True, save_plot=False,
output_tag=None, ax=None, shade_axis=True):
"""Make a plot of running mean difference in SPEI for a given basin, comparing across models.
Arguments:
basin_id: integer, index of basin in the standard list "basin_names"
permodel_dict: dictionary storing SPEI per model, with the structure dict[modelname]['diff'/'WRunoff'/'NRunoff'][basinname] = basin difference in SPEI for this model
which: string identifying 'WRunoff' (with glacial runoff), 'NRunoff' (no runoff), or 'diff' (their difference)
window_yrs: number of years to consider in running average. Default 30
cmap_name: name of matplotlib colormap from which to select line colors. Default 'viridis'
show_plot: Boolean, whether to show the resulting plot. Default True
save_plot: Boolean, whether to save the plot in the working directory. Default False
output_tag: anything special to note in output filename, e.g. global settings applied. Default None will label 'default'
ax: Axes instance on which to plot. Default None will set up a new instance
shade_axis: Boolean, whether to shade regions for which the running window includes years before the glacier model switch-on
"""
window_size = 12 * window_yrs # size of window given monthly data
basin_runavg_bymodel = [np.convolve(permodel_dict[m][which][basin_id], np.ones((window_size,))/window_size, mode='valid') for m in model_names] #compute running means
colors = cm.get_cmap(cmap_name)(np.linspace(0, 1, num=len(model_names)))
styles = ('-',':')
if ax is None:
fig, ax = plt.subplots() # create Axes instance if needed
if shade_axis:
cutoff_year = 1980 + window_yrs/2
ax.axvspan(1900, cutoff_year, color='Grey', alpha=0.3)
for k,m in enumerate(model_names):
ax.plot(yrs[(window_size/2):-(window_size/2 -1)], basin_runavg_bymodel[k], label=m, color=colors[k], ls=styles[np.mod(k, len(styles))], linewidth=2.0)
ax.tick_params(axis='both', labelsize=14)
ax.set(xlim=(1900,2100), xticks=[1900,1950, 2000, 2050, 2100])
if show_labels:
ax.set_xlabel('Years', fontsize=16)
ax.set_ylabel('Mean SPEI {}'.format(which), fontsize=16)
ax.set_title('{} year running mean, {} case, {} basin'.format(window_yrs, which, basin_names[basin_id]), fontsize=18)
ax.legend(loc='best')
plt.tight_layout()
if save_plot:
if output_tag is None:
output_tag='default'
plt.savefig(fname='{}yr_runmean-{}-{}_basin-{}-{}.png'.format(window_yrs, which, basin_names[basin_id], output_tag, date.today()))
if show_plot:
plt.show()
def plot_runmean_comparison(basin_id, permodel_dict, window_yrs=30, cmaps=('Blues', 'Wistia'), show_labels=True, show_plot=True, save_plot=False, output_tag=None, ax=None):
"""Make a plot comparing running-average model projections of SPEI with and without glacial runoff.
Arguments:
basin_id: integer, index of basin in the standard list "basin_names"
permodel_dict: dictionary storing SPEI per model, with the structure dict[modelname]['diff'/'WRunoff'/'NRunoff'][basinname] = basin difference in SPEI for this model
window_yrs: number of years to consider in running average. Default 30
cmaps: tuple (str, str) of matplotlib colormap names from which to select line colors for each case. Default ('Blues', 'Greys')
show_plot: Boolean, whether to show the resulting plot. Default True
save_plot: Boolean, whether to save the plot in the working directory. Default False
output_tag: anything special to note, e.g. global settings applied. Default None will label 'default'
ax: Axes instance on which to plot. Default None will set up a new instance
"""
window_size = 12 * window_yrs # size of window given monthly data
basin_runavg_w = [np.convolve(permodel_dict[m]['WRunoff'][basin_id], np.ones((window_size,))/window_size, mode='valid') for m in model_names] #compute running means
basin_runavg_n = [np.convolve(permodel_dict[m]['NRunoff'][basin_id], np.ones((window_size,))/window_size, mode='valid') for m in model_names] #compute running means
colors_w = cm.get_cmap(cmaps[0])(np.linspace(0.2, 1, num=len(model_names)))
colors_n = cm.get_cmap(cmaps[1])(np.linspace(0.2, 1, num=len(model_names)))
if ax is None:
fig, ax = plt.subplots()
ax.axhline(y=0, color='Gainsboro', linewidth=2.0)
for k,m in enumerate(model_names):
ax.plot(yrs[(window_size/2):-(window_size/2 -1)], basin_runavg_w[k], label=m, color=colors_w[k], linewidth=2.0)
ax.plot(yrs[(window_size/2):-(window_size/2 -1)], basin_runavg_n[k], ls='-.', color=colors_n[k], linewidth=2.0)
ax.tick_params(axis='both', labelsize=14)
ax.set(xlim=(1900,2100), xticks=[1900,1950, 2000, 2050, 2100])
if show_labels:
ax.set_xlabel('Years', fontsize=16)
ax.set_ylabel('SPEI', fontsize=16)
ax.set_title('{} year running average trajectories, {} basin'.format(window_yrs, basin_names[basin_id]), fontsize=18)
ax.legend(loc='best')
plt.tight_layout()
if save_plot:
if output_tag is None:
output_tag='default'
plt.savefig(fname='{}yr_runmean_comp-{}_basin-{}-{}.png'.format(window_yrs, basin_names[basin_id], output_tag, date.today()))
if show_plot:
plt.show()
def plot_basin_runvar(basin_id, permodel_dict, which='diff', window_yrs=30, cmaps='viridis', show_labels=True, show_plot=True, save_plot=False, output_tag=None, ax=None, shade_axis=True):
"""Make a plot comparing running-average model projections of SPEI with and without glacial runoff.
Arguments:
basin_id: integer, index of basin in the standard list "basin_names"
permodel_dict: dictionary storing SPEI per model, with the structure dict[modelname]['diff'/'WRunoff'/'NRunoff'][basinname] = basin difference in SPEI for this model
window_yrs: number of years to consider in running average. Default 30
cmaps: tuple (str, str) of matplotlib colormap names from which to select line colors for each case. Default ('Blues', 'Greys')
show_plot: Boolean, whether to show the resulting plot. Default True
save_plot: Boolean, whether to save the plot in the working directory. Default False
output_tag: anything special to note, e.g. global settings applied. Default None will label 'default'
ax: Axes instance on which to plot. Default None will set up a new instance
shade_axis: Boolean, whether to shade regions for which the running window includes years before the glacier model switch-on
"""
basin_dict = {m: {'NRunoff': [], 'WRunoff': [], 'diff': []} for m in model_names}
varwindow = 12*window_yrs # number of months to window in rolling variance
for m in model_names:
nr = pd.Series(permodel_dict[m]['NRunoff'][basin_id])
wr = | pd.Series(permodel_dict[m]['WRunoff'][basin_id]) | pandas.Series |
"""Pypiplot."""
from d3heatmap import d3heatmap as d3
import pypistats
import requests
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import argparse
import pandas as pd
import numpy as np
import os
from calplot import calplot, yearplot
import tempfile
# %%
class Pypiplot:
"""Class pypiplot."""
def __init__(self, username, category=['with_mirrors', 'without_mirrors'], sep=';', savepath=None, verbose=3):
"""Initialize pypiplot.
Parameters
----------
username : String
Github user account name.
category : list, optional
Downloads is counted for one or both of these categories ['with_mirrors', 'without_mirrors'].
sep : str, (Default: ';')
Seperator to store data in csv file.
savepath : String, (Default: None)
Storage of the csv files containing download statistics.
verbose : int, (Default: 3)
Verbosity message.
Returns
-------
None.
"""
self.username = username
self.repo_link = 'https://api.github.com/users/' + username + '/repos'
self.sep = sep
self.category = category
self.curpath = os.path.dirname(os.path.abspath(__file__))
self.tempdir = os.path.abspath(tempfile.gettempdir())
# self.curpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
if savepath is None:
self.savepath = os.path.join(self.curpath, 'pypi_data')
if not os.path.exists(self.savepath): os.makedirs(self.savepath)
else:
self.savepath = savepath
self.verbose=verbose
def update(self, repo=None):
"""Update repo download file(s).
Description
-----------
Update the local stored file with daily downloads for the specified repos.
Parameters
----------
repo : list of Strings, (Default: None)
None : Take all available pypi repos for the username.
Returns
-------
None.
"""
if (repo is not None) and ('str' in str(type(repo))):
repo = [repo]
# Extract all repos
repos = self._get_repo_names_from_git()
if (repo is not None):
repos = repo
if not np.any(np.isin(repos, repo)):
raise ValueError('[pypiplot] >Error: repos [%s] does not exists or is private.' %(repo))
if self.verbose>=3: print('[pypiplot] >Start updating..')
for repo in repos:
try:
if self.verbose>=3: print('[pypiplot] >[%s]' %(repo))
status = True
df = pypistats.overall(repo, total=True, format="pandas")
df.dropna(inplace=True)
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df = df.sort_values("date")
df.reset_index(drop=True, inplace=True)
del df['percent']
# Merge with any on disk
pathname = os.path.join(self.savepath, repo + '.csv')
if os.path.isfile(pathname):
# Read repo from disk
df_disk = read_repo_counts_from_disk(pathname, self.sep)
# Merge with latest counts
df, status = add_new_counts_to_repo(df, df_disk, repo, verbose=self.verbose)
# Write to disk
if status:
if self.verbose>=3: print('[pypiplot] >Write to disk: [%s]' %(pathname))
df.to_csv(pathname, index=False, sep=self.sep)
except:
if self.verbose>=1: print('[pypiplot] >Skip [%s] because could not retrieve statistics from Pypi.' %(repo))
def stats(self, repo=None):
"""Compute statistics for the specified repo(s).
Description
-----------
Compute and summarize statistics for the libraries.
Parameters
----------
repo : list of Strings, (Default: None)
None : Take all available pypi repos for the username.
Returns
-------
dict()
* data : Download statistics for the repo(s).
* heatmap : DataFrame containing (summarized) data statistics.
* repos : Number of repos.
* n_libraries : Number of libraries processed.
"""
# Retrieve all repos for the username
status, repos, filenames, pathnames = self._get_repos()
if (repo is not None) and ('str' in str(type(repo))):
repo = [repo]
# Check whether specific repo exists.
if repo is not None:
Iloc = np.isin(repos, repo)
if not np.any(Iloc): raise ValueError('[pypiplot] >Error: repos [%s] does not exists or is private. Tip: Run the .update() first.' %(repo))
# repos = [repo]
repos = repos[Iloc]
filenames = filenames[Iloc]
pathnames = pathnames[Iloc]
if not status:
if self.verbose>=3: print('[pypiplot] >No repos could be retrieved from git nor disk <return>')
return None
out = pd.DataFrame()
for repo, pathname in zip(repos, pathnames):
df = read_repo_counts_from_disk(pathname, self.sep)
# Take desired categories
Iloc = np.isin(df['category'], self.category)
df = df.loc[Iloc, :]
# Group by date
df = df.groupby("date").sum().sort_values("date")
df.reset_index(drop=False, inplace=True)
dftmp = df.groupby("date").sum()
dftmp.rename(columns={'downloads': repo}, inplace=True)
out = pd.concat([out, dftmp], axis=0)
out.fillna(value=0, inplace=True)
out.reset_index(drop=False, inplace=True)
out = out.groupby("date").sum()
# Make heatmap
heatmap = _compute_history_heatmap(out, verbose=self.verbose)
self.results = {}
self.results['data'] = out
self.results['heatmap'] = heatmap
self.results['n_libraries'] = out.shape[1]
self.results['repos'] = repos
return self.results
def _get_repo_names_from_git(self):
# Extract repos for user
if self.verbose>=3: print('[pypiplot] >Extracting repo names for [%s]..' %(self.username))
r = requests.get(self.repo_link)
data = r.json()
# Extract the repo names
repos = []
for rep in data:
# full_names.insert(0, rep['full_name'])
repos.insert(0, rep['name'])
if self.verbose>=3: print('[pypiplot] >[%.0d] repos found for [%s]' %(len(repos), self.username))
# Return
return np.array(repos)
def _get_repos(self):
status = True
# Retrieve all downloads from disk
repos, filenames, pathnames = get_files_on_disk(self.savepath, verbose=self.verbose)
# Update and retrieve if needed
if len(repos)==0:
if self.verbose>=3: print('[pypiplot] >No files found on disk. Lets update first!')
# Update all repos
self.update()
# Retrieve all downloads from disk
repos, filenames, pathnames = get_files_on_disk(self.savepath, verbose=0)
if len(repos)==0:
status = False
# Return
return status, repos, filenames, pathnames
def plot_cal(self, method='mean', vmin=None, vmax=None, cmap='YlGn', norm=False):
X = self.results['data'].copy()
if vmin is not None:
X[X<=vmin]=vmin
if vmax is not None:
X[X>=vmax]=vmax
if norm:
print('[pypiplot]> Normalizing..')
X = (X-X.mean(axis=0)) / X.std(axis=0)
print('[pypiplot]> Method: [%s]' %(method))
if method=='median':
events=X.median(axis=1)
elif method=='mean':
events=X.mean(axis=1)
else:
events=X.sum(axis=1)
# Make the calender
plt.figure()
calplot(events, cmap=cmap, colorbar=False, figsize=None, suptitle=None)
def plot_year(self, title=None, description=None, path='d3heatmap.html', vmin=10, vmax=None, cmap='interpolateGreens', visible=True, overwrite=False):
"""Plot heatmap across all repos.
Description
-----------
Plot heatmap of all the repos combined with weeks vs day-name
Parameters
----------
title : String, (Default: None)
Title of the heatmap.
description : String, (Default: None)
Description of the heatmap.
path : String, (Default: 'd3heatmap.html'.)
Full pathname or filename to store the file. If None is used, the system tempdir is used.
vmin : int, (Default: 25)
Minimum color: Used for colorscheme.
None: Take the minimum value in the matrix.
vmax : int, (Default: None)
Minimum color: Used for colorscheme.
None: Take the maximum value in the matrix.
cmap : String, (default: 'interpolateInferno').
The colormap scheme. This can be found at: https://github.com/d3/d3-scale-chromatic.
visible : Bool, (default: True).
Open the browser.
Returns
-------
None.
"""
if description is None:
if self.results['n_libraries']>1:
description = '%.0d Total Pypi downloads across %.0d libraries' %(self.results['heatmap'].sum().sum(), self.results['n_libraries'])
else:
description = '%.0d Total Pypi downloads for %s' %(self.results['heatmap'].sum().sum(), self.results['repos'][0])
if title is None:
title = ''
# Make heatmap with d3js.
d3.matrix(self.results['heatmap'], fontsize=9, title=title, description=description, path=path, width=700, height=200, cmap=cmap, vmin=vmin, vmax=vmax, stroke='black', showfig=visible, overwrite=True)
def plot(self, title=None, method='mean', legend=True, figsize=(25, 15)):
plt.figure()
if method=='median':
self.results['data'].rolling(window=30).median().plot(figsize=figsize, legend=legend)
elif method=='sum':
self.results['data'].rolling(window=30).sum().plot(figsize=figsize, legend=legend)
else:
self.results['data'].rolling(window=30).mean().plot(figsize=figsize, legend=legend)
plt.xlabel('Date')
plt.ylabel('Average number of downloads in a rolling window of 30 days')
plt.grid(True)
plt.title(title)
def plot_heatmap(self, title=None, description=None, path='d3_heatmap_repos.html', vmin=10, vmax=None, width=700, height=None, cmap='interpolateGreens'):
"""Plot heatmap across all repos.
Description
-----------
Plot heatmap of all the repos combined with weeks vs day-name
Parameters
----------
title : String, (Default: None)
Title of the heatmap.
description : String, (Default: None)
Description of the heatmap.
path : String, (Default: 'd3_heatmap_repos.html'.)
Full pathname or filename to store the file. If None is used, the system tempdir is used.
vmin : int, (Default: 25)
Minimum color: Used for colorscheme.
None: Take the minimum value in the matrix.
vmax : int, (Default: None)
Minimum color: Used for colorscheme.
None: Take the maximum value in the matrix.
cmap : String, (default: 'interpolateInferno').
The colormap scheme. This can be found at: https://github.com/d3/d3-scale-chromatic
'interpolateOranges'
width : int, (default: 700).
Width of the window.
height : int, (default: None).
None: Determine based on number of repos.
Returns
-------
None.
"""
heatmap = pd.DataFrame()
cols = self.results['data'].columns.values
for col in cols:
heatmap[col] = _compute_history_heatmap(pd.DataFrame(self.results['data'][col])).sum(axis=0)
if title is None:
title = ''
if description is None:
if self.results['n_libraries']>1:
description = '%.0d Pypi downloads last year across %.0d libraries' %(self.results['heatmap'].sum().sum(), self.results['n_libraries'])
else:
description = '%.0d Pypi downloads last year for %s' %(self.results['heatmap'].sum().sum(), self.results['repos'][0])
if height is None:
height = np.maximum(np.minimum(40 * heatmap.shape[1], 550), 200)
# Make heatmap with d3js.
d3.matrix(heatmap.T, fontsize=9, title=title, description=description, path=path, width=700, height=height, cmap=cmap, vmin=vmin, vmax=vmax, stroke='black', overwrite=True)
# %%
def _compute_history_heatmap(df, duration=360, nr_days=7, verbose=3):
df = df.sum(axis=1).copy()
datetimeformat='%Y-%m-%d'
if verbose>=3: print('[pypiplot] >Computing heatmap across the last %.0d days.' %(duration))
# Make sure the duration is tops 365 from now
extend_days = datetime.now() - timedelta(duration)
dates_start = pd.to_datetime(pd.date_range(start=extend_days, end=df.index[0]).strftime(datetimeformat), format=datetimeformat)
df_start = pd.DataFrame(np.zeros((len(dates_start), 1)), dtype=int, index=dates_start)
# Fill the gap between now and the latest point of the date in the data
dates_end = pd.to_datetime(pd.date_range(start=df.index[-1] + timedelta(1), end=datetime.now()).strftime(datetimeformat), format=datetimeformat)
df_end = pd.DataFrame(np.zeros((len(dates_end), 1)), dtype=int, index=dates_end)
# dataframe containing 365 days of data
df_365 = | pd.concat([df_start, df, df_end], axis=0) | pandas.concat |
import re
import json
import requests
from bs4 import BeautifulSoup
import time
import datetime
import pandas as pd
import numpy as np
# Fans page ==================================================================
# Crawl_PagePosts
def Crawl_PagePosts(pageurl, until_date='2019-01-01'):
page_id = pagecrawler.get_pageid(pageurl)
timeline_cursor = ''
content_df = [] # post
feedback_df = [] # reactions
max_date = datetime.datetime.now()
break_times = 0
rs = requests.session()
# request date and break loop when reach the goal
while max_date >= datetime.datetime.strptime(until_date, '%Y-%m-%d'):
try:
url = 'https://www.facebook.com/pages_reaction_units/more/'
params = {'page_id': page_id,
'cursor': str({"timeline_cursor":timeline_cursor,
"timeline_section_cursor":'{}',
"has_next_page":'true'}),
# 'surface': 'www_pages_home',
'surface': 'www_pages_posts',
'unit_count': 20,
'__a': '1'}
resp = rs.get(url, params=params)
data = json.loads(re.sub(r'for \(;;\);','',resp.text))
# contesnts:poster's name, poster's ID, post ID, time, content
ndf = pageparser.parse_content(data=data)
content_df.append(ndf)
# reactions
ndf1 = pageparser.get_reaction(data=data)
feedback_df.append(ndf1)
# update request params
max_date = ndf['TIME'].max()
print('TimeStamp: {}.'.format(ndf['TIME'].max()))
timeline_cursor = re.findall(r'timeline_cursor%22%3A%22(.*?)%22%2C%22timeline_section_cursor', data['domops'][0][3]['__html'])[0]
# break times to zero
break_times = 0
except:
break_times += 1
print('break_times:', break_times)
time.sleep(3)
time.sleep(2)
if break_times > 5:
break
# join content and reactions
content_df = pd.concat(content_df, ignore_index=True)
feedback_df = pd.concat(feedback_df, ignore_index=True)
df = pd.merge(left=content_df, right=feedback_df, how='left', on=['PAGEID', 'POSTID'])
df = df.loc[:,['NAME', 'TIME', 'CONTENT', 'PAGEID', 'POSTID', 'display_comments_count', 'total_comments_count', 'reaction_count', 'share_count', 'LIKE', 'LOVE', 'HAHA', 'SUPPORT', 'WOW', 'ANGER', 'SORRY']]
df = df.rename(columns={'display_comments_count':'DISPLAYCOMMENTS', 'total_comments_count':'TOTAL_COMMENTS', 'reaction_count':'REACTIONS','share_count':'SHARES'})
df['UPDATETIME'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print('There are {} posts in the DataFrame.'.format(str(df.shape[0])))
return df
# Group page ==================================================================
## parse_group_content
def parse_group_content(resp):
soup = BeautifulSoup(resp.text, 'lxml')
df = []
for ele in soup.findAll('article'):
try:
df.append([
re.findall('"actor_id":([0-9]{1,})' ,str(ele))[0], # actorid
re.findall('"top_level_post_id":"(.*?)"' ,str(ele))[0], # postid
ele.find('strong').text, # actorname
json.loads(re.findall(r'"post_context":({.*?})', str(ele))[0])['publish_time'], # TIME
json.loads(re.findall(r'"post_context":({.*?})', str(ele))[0])['story_name'], # story_name
ele.select_one('div.story_body_container > div').text, # content
' '.join([i.text for i in ele.findAll('span', {'class':'_28wy'})]) # reactions
])
except:
pass
df = pd.DataFrame(data=df, columns = ['ACTORID','POSTID', 'NAME', 'TIME','STORYNAME', 'CONTENT', 'REACTIONS'])
try:
df['GROUPID'] = re.findall('\?id=([0-9]{1,})"',resp.text)[0]
except:
df['GROUPID'] = re.findall('https://m.facebook.com/groups/([0-9]{1,})\?',resp.text)[0]
df['TIME'] = df['TIME'].apply(lambda x: datetime.datetime.fromtimestamp(int(x)))
df['LIKES'] = df['REACTIONS'].apply(lambda x: re.findall('([0-9]{1,}) Like', x)[0] if 'Like' in x else '0')
df['COMMENTS'] = df['REACTIONS'].apply(lambda x: re.findall('([0-9]{1,}) Comment', x)[0] if 'Comment' in x else '0')
df['SHARES'] = df['REACTIONS'].apply(lambda x: re.findall('([0-9]{1,}) Share', x)[0] if 'Share' in x else '0')
df = df.loc[:,['ACTORID', 'NAME', 'GROUPID', 'POSTID', 'TIME', 'STORYNAME', 'CONTENT', 'LIKES', 'COMMENTS', 'SHARES']]
df['UPDATETIME'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return df
## get_bac
def get_bac(resp):
# string = urllib.parse.unquote(resp.text)
# try:
# bac = re.findall('bac=(.*?)%3D',resp.text)[0]
# except:
# bac = re.findall('bac=(.*?)\&multi',resp.text)[0]
bac = re.findall('bac=([0-9A-Za-z]{10,})',resp.text)[0]
return bac
# def get_bac(resp):
# try:
# bac = re.findall('bac=(.*?)%3D',resp.text)[0]
# print('type1')
# except:
# try:
# bac = re.findall('bac=(.*?)&',resp.text)[0]
# print('type2')
# except:
# bac = re.findall('bac%3D(.*?)%26', resp.text)[0]
# print('type3')
# return bac
## Crawl_GroupPosts
def Crawl_GroupPosts(groupurl, until_date='2021-05-01'):
groupurl = re.sub('www','m', groupurl)
headers = {
'referer': 'https://m.facebook.com/',
'cookie': 'locale=en_US',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'
}
df = []
bac = ''
max_date = datetime.datetime.now()
break_times = 0
# request data and break loop when reach the goal
while max_date >= datetime.datetime.strptime(until_date, '%Y-%m-%d'):
# request params
params = {
'bac': bac,
'multi_permalinks': '',
'refid': '18'
}
resp = requests.get(groupurl, headers=headers, params=params)
try:
ndf = parse_group_content(resp)
df.append(ndf)
# update request params
bac = get_bac(resp)
# print(bac)
# there are some posts will be pinned at top, so we can't take the max date directly
max_date = ndf['TIME'].sort_values(ascending=False,ignore_index=True)[3]
print('TimeStamp: {}.'.format(max_date))
break_times = 0 # break times to zero
except:
break_times += 1
print('break_times:', break_times)
time.sleep(2)
if break_times > 5:
return resp
# return print('ERROR: Please send the following URL to the author. \n', resp.url)
# concat data we collect
df = | pd.concat(df, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Test data
"""
# Imports
import pandas as pd
from edbo.feature_utils import build_experiment_index
# Build data sets from indices
def aryl_amination(aryl_halide='ohe', additive='ohe', base='ohe', ligand='ohe', subset=1):
"""
Load aryl amination data with different features.
"""
# SMILES index
index = pd.read_csv('data/aryl_amination/experiment_index.csv')
# Choose supset:
ar123 = ['FC(F)(F)c1ccc(Cl)cc1','FC(F)(F)c1ccc(Br)cc1','FC(F)(F)c1ccc(I)cc1']
ar456 = ['COc1ccc(Cl)cc1','COc1ccc(Br)cc1','COc1ccc(I)cc1']
ar789 = ['CCc1ccc(Cl)cc1','CCc1ccc(Br)cc1','CCc1ccc(I)cc1']
ar101112 = ['Clc1ccccn1','Brc1ccccn1','Ic1ccccn1']
ar131415 = ['Clc1cccnc1','Brc1cccnc1','Ic1cccnc1']
def get_subset(ar):
a = index[index['Aryl_halide_SMILES'] == ar[0]]
b = index[index['Aryl_halide_SMILES'] == ar[1]]
c = index[index['Aryl_halide_SMILES'] == ar[2]]
return pd.concat([a,b,c])
if subset == 1:
index = get_subset(ar123)
elif subset == 2:
index = get_subset(ar456)
elif subset == 3:
index = get_subset(ar789)
elif subset == 4:
index = get_subset(ar101112)
elif subset == 5:
index = get_subset(ar131415)
# Aryl halide features
if aryl_halide == 'dft':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_dft.csv')
elif aryl_halide == 'mordred':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_mordred.csv')
elif aryl_halide == 'ohe':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_ohe.csv')
# Additive features
if additive == 'dft':
add_features = pd.read_csv('data/aryl_amination/additive_dft.csv')
elif additive == 'mordred':
add_features = pd.read_csv('data/aryl_amination/additive_mordred.csv')
elif additive == 'ohe':
add_features = pd.read_csv('data/aryl_amination/additive_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/aryl_amination/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/aryl_amination/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/aryl_amination/base_ohe.csv')
# Ligand features
if ligand == 'Pd(0)-dft':
ligand_features = pd.read_csv('data/aryl_amination/ligand-Pd(0)_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/aryl_amination/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = pd.read_csv('data/aryl_amination/ligand_ohe.csv')
# Build the descriptor set
index_list = [index['Aryl_halide_SMILES'],
index['Additive_SMILES'],
index['Base_SMILES'],
index['Ligand_SMILES']]
lookup_table_list = [aryl_features,
add_features,
base_features,
ligand_features]
lookup_list = ['aryl_halide_SMILES',
'additive_SMILES',
'base_SMILES',
'ligand_SMILES']
experiment_index = build_experiment_index(index['entry'],
index_list,
lookup_table_list,
lookup_list)
experiment_index['yield'] = index['yield'].values
return experiment_index
def suzuki(electrophile='ohe', nucleophile='ohe', base='ohe', ligand='ohe', solvent='ohe'):
"""
Load Suzuki data with different features.
"""
# SMILES index
index = pd.read_csv('data/suzuki/experiment_index.csv')
# Electrophile features
if electrophile == 'dft':
elec_features = pd.read_csv('data/suzuki/electrophile_dft.csv')
elif electrophile == 'mordred':
elec_features = pd.read_csv('data/suzuki/electrophile_mordred.csv')
elif electrophile == 'ohe':
elec_features = pd.read_csv('data/suzuki/electrophile_ohe.csv')
# Nucleophile features
if nucleophile == 'dft':
nuc_features = pd.read_csv('data/suzuki/nucleophile_dft.csv')
elif nucleophile == 'mordred':
nuc_features = pd.read_csv('data/suzuki/nucleophile_mordred.csv')
elif nucleophile == 'ohe':
nuc_features = pd.read_csv('data/suzuki/nucleophile_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/suzuki/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/suzuki/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/suzuki/base_ohe.csv')
# Ligand features
if ligand == 'random-dft':
ligand_features = pd.read_csv('data/suzuki/ligand-random_dft.csv')
elif ligand == 'boltzmann-dft':
ligand_features = pd.read_csv('data/suzuki/ligand-boltzmann_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/suzuki/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = pd.read_csv('data/suzuki/ligand_ohe.csv')
# Solvent features
if solvent == 'dft':
solvent_features = pd.read_csv('data/suzuki/solvent_dft.csv')
elif solvent == 'mordred':
solvent_features = pd.read_csv('data/suzuki/solvent_mordred.csv')
elif solvent == 'ohe':
solvent_features = pd.read_csv('data/suzuki/solvent_ohe.csv')
# Build the descriptor set
index_list = [index['Electrophile_SMILES'],
index['Nucleophile_SMILES'],
index['Base_SMILES'],
index['Ligand_SMILES'],
index['Solvent_SMILES']]
lookup_table_list = [elec_features,
nuc_features,
base_features,
ligand_features,
solvent_features]
lookup_list = ['electrophile_SMILES',
'nucleophile_SMILES',
'base_SMILES',
'ligand_SMILES',
'solvent_SMILES']
experiment_index = build_experiment_index(index['entry'],
index_list,
lookup_table_list,
lookup_list)
experiment_index['yield'] = index['yield']
return experiment_index
def direct_arylation(base='ohe', ligand='ohe', solvent='ohe'):
"""
Load direct arylation data with different features.
"""
# SMILES index
index = pd.read_csv('data/direct_arylation/experiment_index.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/direct_arylation/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/direct_arylation/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/direct_arylation/base_ohe.csv')
# Ligand features
if ligand == 'random-dft':
ligand_features = pd.read_csv('data/direct_arylation/ligand-random_dft.csv')
elif ligand == 'boltzmann-dft':
ligand_features = | pd.read_csv('data/direct_arylation/ligand-boltzmann_dft.csv') | pandas.read_csv |
from typing import Dict
import numpy as np
import pandas as pd
from houseregression_model import __version__ as _version
from houseregression_model.config.core import config
from houseregression_model.processing.utility_functions import load_pipeline
from houseregression_model.processing.validation import validate_inputs
pipeline_file = f"{config.app_config.pipeline_save_file}{_version}.pkl"
pipe = load_pipeline(file_name=pipeline_file)
def make_predictions(*, input_data: pd.DataFrame) -> Dict:
data = | pd.DataFrame(input_data) | pandas.DataFrame |
import pandas as pd
df = pd.read_csv('/mnt/data3/scott/1950-2018_actual_tornadoes.csv')
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from ast import literal_eval
import matplotlib.pyplot as plt
from collections import Counter
from scipy.special import logit
from argparse import ArgumentParser
from pandas.api.types import CategoricalDtype
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# StatsModel methods
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.miscmodels.ordinal_model import OrderedModel
from statsmodels.discrete.discrete_model import Logit
from statsmodels.tools.tools import add_constant
# TQDM for progress tracking
from tqdm import tqdm
# Function to load and compile test prediction files
def collect_preds(pred_file_info,progress_bar = True, progress_bar_desc = ''):
output_df = []
if progress_bar:
iterator = tqdm(range(pred_file_info.shape[0]),desc=progress_bar_desc)
else:
iterator = range(pred_file_info.shape[0])
for i in iterator:
curr_pred = pd.read_csv(pred_file_info.file[i])
curr_pred['repeat'] = pred_file_info.repeat[i]
curr_pred['fold'] = pred_file_info.fold[i]
output_df.append(curr_pred)
return pd.concat(output_df,ignore_index=True)
# Function to load and compile test performance metrics for DeepIMPACT models
def collect_metrics(metric_file_info,progress_bar = True, progress_bar_desc = ''):
output_df = []
if progress_bar:
iterator = tqdm(metric_file_info.file,desc=progress_bar_desc)
else:
iterator = metric_file_info.file
return pd.concat([ | pd.read_csv(f) | pandas.read_csv |
# module for processing adjacency matrices in various ways
import pandas as pd
import numpy as np
import csv
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import pymaid
from tqdm import tqdm
from joblib import Parallel, delayed
class Adjacency_matrix():
def __init__(self, adj, input_counts, mat_type):
self.skids = list(adj.index)
self.pairs = Promat.get_pairs()
self.input_counts = input_counts
self.mat_type = mat_type # 'ad', 'aa', 'dd', 'da', 'summed'
self.adj = pd.DataFrame(adj, index = self.skids, columns = self.skids)
self.adj_fract = self.fraction_input_matrix()
self.adj_inter = self.interlaced_matrix()
self.adj_pairwise = self.average_pairwise_matrix()
def fraction_input_matrix(self):
adj_fract = self.adj.copy()
for column in adj_fract.columns:
if((self.mat_type=='aa') | (self.mat_type=='da')):
axon_input = self.input_counts.loc[column].axon_input
if(axon_input == 0):
adj_fract.loc[:, column] = 0
if(axon_input > 0):
adj_fract.loc[:, column] = adj_fract.loc[:, column]/axon_input
if((self.mat_type=='ad') | (self.mat_type=='dd')):
dendrite_input = self.input_counts.loc[column].dendrite_input
if(dendrite_input == 0):
adj_fract.loc[:, column] = 0
if(dendrite_input > 0):
adj_fract.loc[:, column] = adj_fract.loc[:, column]/dendrite_input
if((self.mat_type=='summed') | (self.mat_type not in ['aa', 'da', 'ad', 'dd']) ):
all_input = self.input_counts.loc[column].dendrite_input + self.input_counts.loc[column].axon_input
if(all_input == 0):
adj_fract.loc[:, column] = 0
if(all_input > 0):
adj_fract.loc[:, column] = adj_fract.loc[:, column]/all_input
return(adj_fract)
def interlaced_matrix(self, fract=True):
if(fract):
adj_mat = self.adj_fract.copy()
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(adj_mat, self.pairs)
if(fract==False):
adj_mat = self.adj.copy()
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(adj_mat, self.pairs)
# left_right interlaced order for brain matrix
brain_pair_order = []
for i in range(0, len(brain_pairs)):
brain_pair_order.append(brain_pairs.iloc[i].leftid)
brain_pair_order.append(brain_pairs.iloc[i].rightid)
order = brain_pair_order + list(brain_nonpaired.nonpaired)
interlaced_mat = adj_mat.loc[order, order]
index_df = pd.DataFrame([['pairs', Promat.get_paired_skids(skid, self.pairs)[0], skid] for skid in brain_pair_order] + [['nonpaired', skid, skid] for skid in list(brain_nonpaired.nonpaired)],
columns = ['pair_status', 'pair_id', 'skid'])
index = pd.MultiIndex.from_frame(index_df)
interlaced_mat.index = index
interlaced_mat.columns = index
return(interlaced_mat)
def average_pairwise_matrix(self):
adj = self.adj_inter.copy()
adj = adj.groupby('pair_id', axis = 'index').sum().groupby('pair_id', axis='columns').sum()
order = [x[1] for x in self.adj_inter.index]
# remove duplicates (in pair_ids)
order_unique = []
for x in order:
if (order_unique.count(x) == 0):
order_unique.append(x)
# order as before
adj = adj.loc[order_unique, order_unique]
# regenerate multiindex
index = [x[0:2] for x in self.adj_inter.index] # remove skid ids from index
# remove duplicates (in pair_ids)
index_unique = []
for x in index:
if (index_unique.count(x) == 0):
index_unique.append(x)
# add back appropriate multiindex
index_df = pd.DataFrame(index_unique, columns = ['pair_status', 'pair_id'])
index_df = pd.MultiIndex.from_frame(index_df)
adj.index = index_df
adj.columns = index_df
# convert to average (from sum) for paired neurons
adj.loc['pairs'] = adj.loc['pairs'].values/2
adj.loc['nonpaired', 'pairs'] = adj.loc['nonpaired', 'pairs'].values/2
return(adj)
def downstream(self, source, threshold, exclude=[], by_group=False, exclude_unpaired = False):
adj = self.adj_pairwise
source_pair_id = np.unique([x[1] for x in self.adj_inter.loc[(slice(None), slice(None), source), :].index])
if(by_group):
bin_mat = adj.loc[(slice(None), source_pair_id), :].sum(axis=0) > threshold
bin_column = np.where(bin_mat)[0]
ds_neurons = bin_mat.index[bin_column]
ds_neurons_skids = []
for pair in ds_neurons:
if((pair[0] == 'pairs') & (pair[1] not in exclude)):
ds_neurons_skids.append(pair[1])
ds_neurons_skids.append(Promat.identify_pair(pair[1], self.pairs))
if((pair[0] == 'nonpaired') & (pair[1] not in exclude) & (exclude_unpaired==False)):
ds_neurons_skids.append(pair[1])
return(ds_neurons_skids)
if(by_group==False):
bin_mat = adj.loc[(slice(None), source_pair_id), :] > threshold
bin_column = np.where(bin_mat.sum(axis = 0) > 0)[0]
ds_neurons = bin_mat.columns[bin_column]
bin_row = np.where(bin_mat.sum(axis = 1) > 0)[0]
us_neurons = bin_mat.index[bin_row]
ds_neurons_skids = []
for pair in ds_neurons:
if((pair[0] == 'pairs') & (pair[1] not in exclude)):
ds_neurons_skids.append(pair[1])
ds_neurons_skids.append(Promat.identify_pair(pair[1], self.pairs))
if((pair[0] == 'nonpaired') & (pair[1] not in exclude) & (exclude_unpaired==False)):
ds_neurons_skids.append(pair[1])
source_skids = []
for pair in us_neurons:
if(pair[0] == 'pairs'):
source_skids.append(pair[1])
source_skids.append(Promat.identify_pair(pair[1], self.pairs))
if(pair[0] == 'nonpaired'):
source_skids.append(pair[1])
edges = []
for pair in us_neurons:
if(pair[0] == 'pairs'):
specific_ds = adj.loc[('pairs', pair[1]), bin_mat.loc[('pairs', pair[1]), :]].index
if(exclude_unpaired):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[0]=='pairs') & (x[1] not in exclude)]
if(exclude_unpaired==False):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[1] not in exclude)]
for edge in specific_ds_edges:
edges.append(edge)
if(pair[0] == 'nonpaired'):
specific_ds = adj.loc[('nonpaired', pair[1]), bin_mat.loc[('nonpaired', pair[1]), :]].index
if(exclude_unpaired):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[0]=='pairs') & (x[1] not in exclude)]
if(exclude_unpaired==False):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[1] not in exclude)]
for edge in specific_ds_edges:
edges.append(edge)
return(source_skids, ds_neurons_skids, edges)
def upstream(self, source, threshold, exclude = []):
adj = self.adj_pairwise
source_pair_id = np.unique([x[1] for x in self.adj_inter.loc[(slice(None), slice(None), source), :].index])
bin_mat = adj.loc[:, (slice(None), source_pair_id)] > threshold
bin_row = np.where(bin_mat.sum(axis = 1) > 0)[0]
us_neuron_pair_ids = bin_mat.index[bin_row]
us_neurons_skids = []
for pair in us_neuron_pair_ids:
if((pair[0] == 'pairs') & (pair[1] not in exclude)):
us_neurons_skids.append(pair[1])
us_neurons_skids.append(Promat.identify_pair(pair[1], self.pairs))
if((pair[0] == 'nonpaired') & (pair[1] not in exclude)):
us_neurons_skids.append(pair[1])
us_neuron_pair_ids = Promat.extract_pairs_from_list(us_neurons_skids, self.pairs)
us_neuron_pair_ids = list(us_neuron_pair_ids[0].leftid) + list(us_neuron_pair_ids[2].nonpaired)
edges = []
for pair in us_neuron_pair_ids:
specific_ds = bin_mat.loc[(slice(None), pair), bin_mat.loc[(slice(None), pair), :].values[0]].columns
specific_ds_edges = [[pair, x[1]] for x in specific_ds]
for edge in specific_ds_edges:
edges.append(edge)
return(us_neurons_skids, edges)
def downstream_multihop(self, source, threshold, min_members=0, hops=10, exclude=[], strict=False, allow_source_ds=False):
if(allow_source_ds==False):
_, ds, edges = self.downstream(source, threshold, exclude=(source + exclude))
if(allow_source_ds):
_, ds, edges = self.downstream(source, threshold, exclude=(exclude))
left = Promat.get_hemis('left')
right = Promat.get_hemis('right')
_, ds = self.edge_threshold(edges, threshold, direction='downstream', strict=strict, left=left, right=right)
if(allow_source_ds==False):
before = source + ds
if(allow_source_ds):
before = ds
layers = []
layers.append(ds)
for i in range(0,(hops-1)):
source = ds
_, ds, edges = self.downstream(source, threshold, exclude=before)
_, ds = self.edge_threshold(edges, threshold, direction='downstream', strict=strict, left=left, right=right)
if((len(ds)!=0) & (len(ds)>=min_members)):
layers.append(ds)
before = before + ds
return(layers)
def upstream_multihop(self, source, threshold, min_members=10, hops=10, exclude=[], strict=False, allow_source_us=False):
if(allow_source_us==False):
us, edges = self.upstream(source, threshold, exclude=(source + exclude))
if(allow_source_us):
us, edges = self.upstream(source, threshold, exclude=(exclude))
_, us = self.edge_threshold(edges, threshold, direction='upstream', strict=strict)
if(allow_source_us==False):
before = source + us
if(allow_source_us):
before = us
layers = []
layers.append(us)
for i in range(0,(hops-1)):
source = us
us, edges = self.upstream(source, threshold, exclude = before)
_, us = self.edge_threshold(edges, threshold, direction='upstream', strict=strict)
if((len(us)!=0) & (len(us)>=min_members)):
layers.append(us)
before = before + us
return(layers)
# checking additional threshold criteria after identifying neurons over summed threshold
# left and right are only necessary when nonpaired neurons are included
def edge_threshold(self, edges, threshold, direction, strict=False, include_nonpaired=True, left=[], right=[]):
adj = self.adj_inter.copy()
all_edges = []
for edge in edges:
print(edge)
specific_edges = adj.loc[(slice(None), edge[0]), (slice(None), edge[1])]
us_pair_status = adj.loc[(slice(None), slice(None), edge[0]), :].index[0][0]
ds_pair_status = adj.loc[(slice(None), slice(None), edge[1]), :].index[0][0]
# note that edge weights in 'left', 'right' columns refer to %input onto the dendrite of the left or right hemisphere downstream neuron
# the 'type' column indicates whether the edge is contralateral/ipsilateral (this can allow one to determine whether the signal originated on the left or right side if that's important)
# note: the split_paired_edges() method takes the output of threshold_edge_list() and splits these paired edges so that it becomes more explicit which hemisphere the upstream neuron belongs to
# check for paired connections
if((us_pair_status == 'pairs') & (ds_pair_status == 'pairs')):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0,0], specific_edges.iloc[1,1], False, 'ipsilateral', 'paired','paired'],
[edge[0], edge[1], specific_edges.iloc[1,0], specific_edges.iloc[0,1], False, 'contralateral', 'paired','paired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(strict==True):
# is each edge weight over threshold?
for index in specific_edges.index:
if((specific_edges.loc[index].left>threshold) & (specific_edges.loc[index].right>threshold)):
specific_edges.loc[index, 'overthres'] = True
if(strict==False):
# is average edge weight over threshold
for index in specific_edges.index:
if(((specific_edges.loc[index].left + specific_edges.loc[index].right)/2) > threshold):
specific_edges.loc[index, 'overthres'] = True
# are both edges present?
for index in specific_edges.index:
if((specific_edges.loc[index].left==0) | (specific_edges.loc[index].right==0)):
specific_edges.loc[index, 'overthres'] = False
all_edges.append(specific_edges.values[0])
all_edges.append(specific_edges.values[1])
# check for edges to downstream nonpaired neurons
if((us_pair_status == 'pairs') & (ds_pair_status == 'nonpaired') & (include_nonpaired==True)):
if(edge[1] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0].values[0], 0, False, 'ipsilateral', 'paired', 'nonpaired'],
[edge[0], edge[1], specific_edges.iloc[1].values[0], 0, False, 'contralateral', 'paired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[1] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], 0, specific_edges.iloc[0].values[0], False, 'contralateral', 'paired', 'nonpaired'],
[edge[0], edge[1], 0, specific_edges.iloc[1].values[0], False, 'ipsilateral', 'paired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
# is edge over threshold?
# don't check both because one will be missing
# strict==True/False doesn't apply for the same reason
for index in specific_edges.index:
if(((specific_edges.loc[index].left + specific_edges.loc[index].right)>threshold)):
specific_edges.loc[index, 'overthres'] = True
all_edges.append(specific_edges.values[0])
all_edges.append(specific_edges.values[1])
# check for edges from upstream nonpaired neurons
if((us_pair_status == 'nonpaired') & (ds_pair_status == 'pairs') & (include_nonpaired==True)):
if(edge[0] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0, 0], 0, False, 'ipsilateral', 'nonpaired', 'paired'],
[edge[0], edge[1], 0, specific_edges.iloc[0, 1], False, 'contralateral', 'nonpaired', 'paired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[0] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0, 0], 0, False, 'contralateral', 'nonpaired', 'paired'],
[edge[0], edge[1], 0, specific_edges.iloc[0, 1], False, 'ipsilateral', 'nonpaired', 'paired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
# is edge over threshold?
# don't check both because one will be missing
# strict==True/False doesn't apply for the same reason
for index in specific_edges.index:
if(((specific_edges.loc[index].left + specific_edges.loc[index].right)>threshold)):
specific_edges.loc[index, 'overthres'] = True
all_edges.append(specific_edges.values[0])
all_edges.append(specific_edges.values[1])
# check for edges between two nonpaired neurons
if((us_pair_status == 'nonpaired') & (ds_pair_status == 'nonpaired') & (include_nonpaired==True)):
edge_weight = specific_edges.values[0][0]
if(edge[0] in left):
if(edge[1] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], 0, edge_weight, False, 'contralateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[1] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], edge_weight, 0, False, 'ipsilateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[0] in right):
if(edge[1] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], edge_weight, 0, False, 'contralateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[1] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], 0, edge_weight, False, 'ipsilateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
# is edge over threshold?
# only one edge so strict==True/False doesn't apply
if(edge_weight>threshold):
specific_edges.loc[:, 'overthres'] = True
all_edges.append(specific_edges.values[0])
all_edges = pd.DataFrame(all_edges, columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(direction=='downstream'):
partner_skids = np.unique(all_edges[all_edges.overthres==True].downstream_pair_id) # identify downstream pairs
if(direction=='upstream'):
partner_skids = np.unique(all_edges[all_edges.overthres==True].upstream_pair_id) # identify upstream pairs
partner_skids = [x[2] for x in adj.loc[(slice(None), partner_skids), :].index] # convert from pair_id to skids
return(all_edges, partner_skids)
# select edges from results of edge_threshold that are over threshold; include non_paired edges as specified by user
def select_edges(self, pair_id, threshold, edges_only=False, include_nonpaired=[], exclude_nonpaired=[], left=[], right=[]):
_, ds, ds_edges = self.downstream(pair_id, threshold)
ds_edges, _ = self.edge_threshold(ds_edges, threshold, 'downstream', include_nonpaired=include_nonpaired, left=left, right=right)
overthres_ds_edges = ds_edges[ds_edges.overthres==True]
overthres_ds_edges.reset_index(inplace=True)
overthres_ds_edges.drop(labels=['index', 'overthres'], axis=1, inplace=True)
if(edges_only==False):
return(overthres_ds_edges, np.unique(overthres_ds_edges.downstream_pair_id))
if(edges_only):
return(overthres_ds_edges)
# generate edge list for whole matrix with some threshold
def threshold_edge_list(self, all_sources, matrix_nonpaired, threshold, left, right):
all_edges = Parallel(n_jobs=-1)(delayed(self.select_edges)(pair, threshold, edges_only=True, include_nonpaired=matrix_nonpaired, left=left, right=right) for pair in tqdm(all_sources))
all_edges_combined = [x for x in all_edges if type(x)==pd.DataFrame]
all_edges_combined = pd.concat(all_edges_combined, axis=0)
all_edges_combined.reset_index(inplace=True, drop=True)
return(all_edges_combined)
# convert paired edge list with pair-wise threshold back to normal edge list, input from threshold_edge_list()
# note that neurons with bilateral dendrites aren't treated in any special way, so they may be indicated as contralateral edges even if that's inaccurate/complicated
def split_paired_edges(self, all_edges_combined, left, right, flip_weirdos=True):
pairs = self.pairs
# note that edge_weights are from the perspective of the downstream neuron, i.e. %input onto their dendrite
all_edges_combined_split = []
for i in range(len(all_edges_combined.index)):
row = all_edges_combined.iloc[i]
if((row.upstream_status=='paired') & (row.downstream_status=='paired')):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), Promat.identify_pair(row.downstream_pair_id, pairs), 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, Promat.identify_pair(row.downstream_pair_id, pairs), 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
# note that pair_ids are really skeleton IDs for nonpaired neurons; this allows one to compare to left/right annotations
# this comparison is required because the location of nonpaired -> pair edges depends on whether the nonpaired is left or right
if((row.upstream_status=='nonpaired') & (row.downstream_status=='paired')):
if(row.upstream_pair_id in left):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, Promat.identify_pair(row.downstream_pair_id, pairs), 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.upstream_pair_id in right):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, Promat.identify_pair(row.downstream_pair_id, pairs), 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
# use the downstream_pair_id because this is really just skeleton ID for nonpaired neurons
# therefore one can compare to left/right annotations to determine which hemisphere it belongs to
if((row.upstream_status=='paired') & (row.downstream_status=='nonpaired')):
if(row.downstream_pair_id in left):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.downstream_pair_id in right):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), row.downstream_pair_id, 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
# use the downstream_pair_id because this is really just skeleton ID for nonpaired neurons
# therefore one can compare to left/right annotations to determine which hemisphere it belongs to
if((row.upstream_status=='nonpaired') & (row.downstream_status=='nonpaired')):
if(row.downstream_pair_id in left):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.downstream_pair_id in right):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
all_edges_combined_split = pd.DataFrame(all_edges_combined_split, columns = ['upstream_skid', 'downstream_skid', 'upstream_side', 'downstream_side', 'edge_weight', 'type', 'upstream_status', 'downstream_status'])
return(all_edges_combined_split)
# generate edge list for whole matrix
def edge_list(self, exclude_loops=False):
edges = []
for i in range(len(self.adj.index)):
for j in range(len(self.adj.columns)):
if(exclude_loops):
if((self.adj.iloc[i, j]>0) & (i!=j)):
edges.append([self.adj.index[i], self.adj.columns[j]])
if(exclude_loops==False):
if(self.adj.iloc[i, j]>0):
edges.append([self.adj.index[i], self.adj.columns[j]])
edges = pd.DataFrame(edges, columns = ['upstream_pair_id', 'downstream_pair_id'])
return(edges)
# generate a binary connectivity matrix that displays number of hops between neuron types
def hop_matrix(self, layer_id_skids, source_leftid, destination_leftid, include_start=False):
mat = pd.DataFrame(np.zeros(shape = (len(source_leftid), len(destination_leftid))),
index = source_leftid,
columns = destination_leftid)
for index in mat.index:
data = layer_id_skids.loc[index, :]
for i, hop in enumerate(data):
for column in mat.columns:
if(column in hop):
if(include_start==True): # if the source of the hop signal is the first layer
mat.loc[index, column] = i
if(include_start==False): # if the first layer is the first layer downstream of source
mat.loc[index, column] = i+1
max_value = mat.values.max()
mat_plotting = mat.copy()
for index in mat_plotting.index:
for column in mat_plotting.columns:
if(mat_plotting.loc[index, column]>0):
mat_plotting.loc[index, column] = 1 - (mat_plotting.loc[index, column] - max_value)
return(mat, mat_plotting)
class Promat():
# default method to import pair list and process it to deal with duplicated neurons
@staticmethod
def get_pairs(pairs_path='data/pairs/pairs-2021-04-06.csv', flip_weirdos=True):
print(f'Path to pairs list is: {pairs_path}')
pairs = pd.read_csv(pairs_path, header = 0) # import pairs, manually determined with help from <NAME> and <NAME>'s scripts
pairs = pairs.loc[:, ['leftid', 'rightid']] # only include useful columns
# duplicated right-side neurons to throw out for simplicity
duplicated = pymaid.get_skids_by_annotation('mw duplicated neurons to delete')
duplicated_index = np.where(sum([pairs.rightid==x for x in duplicated])==1)[0]
pairs = pairs.drop(duplicated_index)
# change left/right ids of contra-contra neurons so they behave properly in downstream analysis
# these neurons have somas on one brain hemisphere and dendrites/axons on the other
# and so they functionally all completely contralateral and can therefore be considered ipsilateral neurons
if(flip_weirdos):
# identify contra-contra neurons
contra_contra = np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw contralateral dendrite'))
contra_contra_pairs = Promat.extract_pairs_from_list(contra_contra, pairs)[0]
if(len(contra_contra_pairs)>0):
# flip left/right neurons in contra-contra neurons
for index in contra_contra_pairs.index:
cc_left = contra_contra_pairs.loc[index, 'leftid']
cc_right = contra_contra_pairs.loc[index, 'rightid']
pairs.loc[pairs[pairs.leftid==cc_left].index, 'rightid'] = cc_left
pairs.loc[pairs[pairs.leftid==cc_left].index, 'leftid'] = cc_right
return(pairs)
# returns all skids in left or right side of the brain, depending on whether side = 'left' or 'right'
def get_hemis(side=None, flip_weirdos=True):
left = pymaid.get_skids_by_annotation('mw left')
right = pymaid.get_skids_by_annotation('mw right')
if(flip_weirdos):
# identifying contra-contra neurons so they can be flipped to opposite side of brain
neurons_to_flip = np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw contralateral dendrite'))
neurons_to_flip_left = [skid for skid in neurons_to_flip if skid in left]
neurons_to_flip_right = [skid for skid in neurons_to_flip if skid in right]
# removing neurons_to_flip and adding to the other side
left = list(np.setdiff1d(left, neurons_to_flip_left)) + neurons_to_flip_right
right = list(np.setdiff1d(right, neurons_to_flip_right)) + neurons_to_flip_left
if(side=='left'): return(left)
if(side=='right'): return(right)
if(side==None): return([left, right])
# converts any df with df.index = list of skids to a multiindex with ['pair_status', 'pair_id', 'skid']
# 'pair_status': pairs / nonpaired
# 'pair_id': left skid of a pair or simply the skid of a nonpaired neuron
@staticmethod
def convert_df_to_pairwise(df, pairs=None):
if(pairs==None):
pairs = Promat.get_pairs()
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(df.index, pairList = pairs)
# left_right interlaced order for brain matrix
brain_pair_order = []
for i in range(0, len(brain_pairs)):
brain_pair_order.append(brain_pairs.iloc[i].leftid)
brain_pair_order.append(brain_pairs.iloc[i].rightid)
order = brain_pair_order + list(brain_nonpaired.nonpaired)
interlaced = df.loc[order, :]
index_df = pd.DataFrame([['pairs', Promat.get_paired_skids(skid, pairs)[0], skid] for skid in brain_pair_order] + [['nonpaired', skid, skid] for skid in list(brain_nonpaired.nonpaired)],
columns = ['pair_status', 'pair_id', 'skid'])
index = pd.MultiIndex.from_frame(index_df)
interlaced.index = index
return(interlaced)
# trim out neurons not currently in the brain matrix
@staticmethod
def trim_missing(skidList, brainMatrix):
trimmedList = []
for i in skidList:
if(i in brainMatrix.index):
trimmedList.append(i)
else:
print("*WARNING* skid: %i is not in whole brain matrix" % (i))
return(trimmedList)
# identify skeleton ID of hemilateral neuron pair, based on CSV pair list
@staticmethod
def identify_pair(skid, pairList):
pair_skid = []
if(skid in pairList["leftid"].values):
pair_skid = pairList["rightid"][pairList["leftid"]==skid].iloc[0]
if(skid in pairList["rightid"].values):
pair_skid = pairList["leftid"][pairList["rightid"]==skid].iloc[0]
if((skid not in pairList['rightid'].values) & (skid not in pairList['leftid'].values)):
print(f'skid {skid} is not in paired list')
pair_skid = skid
return(pair_skid)
# returns paired skids in array [left, right]; can input either left or right skid of a pair to identify
@staticmethod
def get_paired_skids(skid, pairList):
if(type(skid)!=list):
if(skid in pairList["leftid"].values):
pair_right = pairList["rightid"][pairList["leftid"]==skid].iloc[0]
pair_left = skid
if(skid in pairList["rightid"].values):
pair_left = pairList["leftid"][pairList["rightid"]==skid].iloc[0]
pair_right = skid
if((skid in pairList["leftid"].values) == False and (skid in pairList["rightid"].values) == False):
print(f"skid {skid} is not in paired list")
return([skid])
return([pair_left, pair_right])
if(type(skid)==list):
data = [Promat.get_paired_skids(x, pairList) for x in skid]
df = pd.DataFrame(data, columns = ['leftid', 'rightid'])
return(df)
# converts array of skids into left-right pairs in separate columns
# puts unpaired and nonpaired neurons in different lists
@staticmethod
def extract_pairs_from_list(skids, pairList):
pairs = pd.DataFrame([], columns = ['leftid', 'rightid'])
unpaired = pd.DataFrame([], columns = ['unpaired'])
nonpaired = pd.DataFrame([], columns = ['nonpaired'])
for i in skids:
if((int(i) not in pairList.leftid.values) & (int(i) not in pairList.rightid.values)):
nonpaired = nonpaired.append({'nonpaired': int(i)}, ignore_index=True)
continue
if((int(i) in pairList["leftid"].values) & (Promat.get_paired_skids(int(i), pairList)[1] in skids)):
pair = Promat.get_paired_skids(int(i), pairList)
pairs = pairs.append({'leftid': pair[0], 'rightid': pair[1]}, ignore_index=True)
if(((int(i) in pairList["leftid"].values) & (Promat.get_paired_skids(int(i), pairList)[1] not in skids)|
(int(i) in pairList["rightid"].values) & (Promat.get_paired_skids(int(i), pairList)[0] not in skids))):
unpaired = unpaired.append({'unpaired': int(i)}, ignore_index=True)
pairs = pd.DataFrame(pairs)
unpaired = pd.DataFrame(unpaired)
nonpaired = pd.DataFrame(nonpaired)
return(pairs, unpaired, nonpaired)
# loads neurons pairs from selected pymaid annotation
@staticmethod
def load_pairs_from_annotation(annot, pairList, return_type='pairs', skids=None, use_skids=False):
if(use_skids==False):
skids = pymaid.get_skids_by_annotation(annot)
pairs = Promat.extract_pairs_from_list(skids, pairList)
if(return_type=='pairs'):
return(pairs[0])
if(return_type=='unpaired'):
return(pairs[1])
if(return_type=='nonpaired'):
return(pairs[2])
if(return_type=='all_pair_ids'):
pairs_pair_id = list(pairs[0].leftid)
nonpaired_pair_id = list(pairs[2].nonpaired)
combined = pairs_pair_id + nonpaired_pair_id
return(combined)
# include nonpaired neurons and ['leftid', 'rightid'] columns; duplicated leftid/rightid for nonpaired neurons
if(return_type=='all_pair_ids_bothsides'):
pairs_pair_id = list(pairs[0].leftid)
nonpaired_pair_id = list(pairs[2].nonpaired)
combined_left = pairs_pair_id + nonpaired_pair_id
pairs_id_right = list(pairs[0].rightid)
combined_right = pairs_id_right + nonpaired_pair_id
combined = pd.DataFrame(zip(combined_left, combined_right), columns=['leftid', 'rightid'])
return(combined)
# loads neurons pairs from selected pymaid annotation
@staticmethod
def get_pairs_from_list(skids, pairList, return_type='pairs'):
pairs = Promat.extract_pairs_from_list(skids, pairList)
if(return_type=='pairs'):
return(pairs[0])
if(return_type=='unpaired'):
return(pairs[1])
if(return_type=='nonpaired'):
return(pairs[2])
if(return_type=='all_pair_ids'):
pairs_pair_id = list(pairs[0].leftid)
nonpaired_pair_id = list(pairs[2].nonpaired)
combined = pairs_pair_id + nonpaired_pair_id
return(combined)
# generates interlaced left-right pair adjacency matrix with nonpaired neurons at bottom and right
@staticmethod
def interlaced_matrix(adj_df, pairs):
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(mg.meta.index, pairs)
# left_right interlaced order for brain matrix
brain_pair_order = []
for i in range(0, len(brain_pairs)):
brain_pair_order.append(brain_pairs.iloc[i].leftid)
brain_pair_order.append(brain_pairs.iloc[i].rightid)
interlaced_mat = adj_df.loc[brain_pair_order + list(brain_nonpaired), brain_pair_order + list(brain_nonpaired)]
index_df = pd.DataFrame([['pairs', skid] for skid in brain_pair_order] + [['nonpaired', skid] for skid in list(brain_nonpaired)],
columns = ['pair_status', 'skid'])
index = pd.MultiIndex.from_frame(index_df)
interlaced_mat.index = index
interlaced_mat.columns = index
return(interlaced_mat)
# converts matrix to fraction_input matrix by dividing every column by dendritic input
@staticmethod
def fraction_input_matrix(adj_df, mg, axon=False):
for column in adj_df.columns:
if(axon):
axon_input = mg.meta.loc[column].axon_input
adj_df.loc[:, column] = adj_df.loc[:, column]/axon_input
if(axon==False):
dendrite_input = mg.meta.loc[column].dendrite_input
adj_df.loc[:, column] = adj_df.loc[:, column]/dendrite_input
return(adj_df)
# converts a interlaced left-right pair adjacency matrix into a binary connection matrix based on some threshold
@staticmethod
def binary_matrix(adj, threshold, total_threshold):
oddCols = np.arange(0, len(adj.columns), 2)
oddRows = np.arange(0, len(adj.index), 2)
# column names are the skid of left neuron from pair
binMat = np.zeros(shape=(len(oddRows),len(oddCols)))
binMat = pd.DataFrame(binMat, columns = adj.columns[oddCols], index = adj.index[oddRows])
for i in oddRows:
for j in oddCols:
sum_all = adj.iat[i, j] + adj.iat[i+1, j+1] + adj.iat[i+1, j] + adj.iat[i, j+1]
if(adj.iat[i, j] >= threshold and adj.iat[i+1, j+1] >= threshold and sum_all >= total_threshold):
binMat.iat[int(i/2), int(j/2)] = 1
if(adj.iat[i+1, j] >= threshold and adj.iat[i, j+1] >= threshold and sum_all >= total_threshold):
binMat.iat[int(i/2), int(j/2)] = 1
return(binMat)
# summing input from a group of upstream neurons
# generating DataFrame with sorted leftid, rightid, summed-input left, summed-input right
# SOMETHING IS WRONG***
# It works as local function within particular .py file, but not when called through process_matrix.py
@staticmethod
def summed_input(group_skids, matrix, pairList):
submatrix = matrix.loc[group_skids, :]
submatrix = submatrix.sum(axis = 0)
cols = ['leftid', 'rightid', 'leftid_input', 'rightid_input']
summed_paired = []
for i in range(0, len(pairList['leftid'])):
if(pairList['leftid'][i] in submatrix.index):
left_identifier = pairList['leftid'][i]
left_sum = submatrix.loc[left_identifier]
right_identifier = Promat.identify_pair(pairList['leftid'][i], pairList)
right_sum = submatrix.loc[right_identifier]
summed_paired.append([left_identifier, right_identifier, left_sum, right_sum])
summed_paired = pd.DataFrame(summed_paired, columns= cols)
return(summed_paired)
# identifies downstream neurons based on summed threshold (summed left/right input) and low_threshold (required edge weight on weak side)
@staticmethod
def identify_downstream(sum_df, summed_threshold, low_threshold):
downstream = []
for i in range(0, len(sum_df['leftid'])):
if((sum_df['leftid_input'].iloc[i] + sum_df['rightid_input'].iloc[i])>=summed_threshold):
if(sum_df['leftid_input'].iloc[i]>sum_df['rightid_input'].iloc[i] and sum_df['rightid_input'].iloc[i]>=low_threshold):
downstream.append(sum_df.iloc[i])
if(sum_df['rightid_input'].iloc[i]>sum_df['leftid_input'].iloc[i] and sum_df['leftid_input'].iloc[i]>=low_threshold):
downstream.append(sum_df.iloc[i])
return(pd.DataFrame(downstream))
# compares neuron similarity based on inputs, outputs, or both
# outputs a matrix where each row/column is a pair of neurons
# NOT currently working
@staticmethod
def similarity_matrix(matrix_path, type):
matrix = pd.read_csv(matrix_path, header=0, index_col=0, quotechar='"', skipinitialspace=True)
oddCols = np.arange(0, len(matrix.columns), 2)
oddRows = np.arange(0, len(matrix.index), 2)
# column names are the skid of left neuron from pair
sim_matrix = np.zeros(shape=(len(oddRows),len(oddCols)))
sim_matrix = pd.DataFrame(sim_matrix, columns = matrix.columns[oddCols], index = matrix.index[oddRows])
return(sim_matrix)
@staticmethod
def writeCSV(data, path):
with open(path, mode='w') as file:
writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in data:
writer.writerow(row)
print("Write complete")
return()
@staticmethod
def pull_adj(type_adj, subgraph):
adj = pd.read_csv(f'data/adj/all-neurons_{type_adj}.csv', index_col = 0).rename(columns=int)
if(subgraph=='brain'):
brain = pymaid.get_skids_by_annotation('mw brain paper clustered neurons')
adj = adj.loc[np.intersect1d(adj.index, brain), np.intersect1d(adj.index, brain)]
if(subgraph=='brain and accessory'):
brain = pymaid.get_skids_by_annotation('mw brain paper clustered neurons') + pymaid.get_skids_by_annotation('mw brain accessory neurons')
adj = adj.loc[np.intersect1d(adj.index, brain), np.intersect1d(adj.index, brain)]
#if(subgraph=='brain-A1'):
#if(subgraph=='A1'):
return(adj)
def pull_edges(type_edges, pairs_combined, select_neurons=[]):
if(pairs_combined):
edges = pd.read_csv(f'data/edges_threshold/{type_edges}_all-paired-edges.csv', index_col=0)
if(len(select_neurons)>0):
indices_us = [True if x in select_neurons else False for x in edges.upstream_pair_id.to_list()]
indices_ds = [True if x in select_neurons else False for x in edges.downstream_pair_id.to_list()]
edges = edges.loc[np.logical_and(indices_us, indices_ds), :]
if(pairs_combined==False):
edges = pd.read_csv(f'data/edges_threshold/pairwise-threshold_{type_edges}_all-edges.csv', index_col=0)
if(len(select_neurons)>0):
indices_us = [True if x in select_neurons else False for x in edges.upstream_skid.to_list()]
indices_ds = [True if x in select_neurons else False for x in edges.downstream_skid.to_list()]
edges = edges.loc[np.logical_and(indices_us, indices_ds), :]
return(edges)
# recursive function that identifies all downstream partners X-hops away from source
# uses pregenerated edge list from threshold_edge_list() or the split-pair version
@staticmethod
def downstream_multihop(edges, sources, hops, hops_iter=1, pairs_combined=False, exclude_source=True, exclude=[], exclude_skids_from_source=[]):
if(pairs_combined):
id1 = 'upstream_pair_id'
id2 = 'downstream_pair_id'
if(pairs_combined==False):
id1 = 'upstream_skid'
id2 = 'downstream_skid'
edges_df = edges.set_index(id1)
if(hops_iter>1): sources = list(np.setdiff1d(sources, exclude_skids_from_source)) # exclude user-selected neurons from sources
ds = list(np.unique(edges_df.loc[np.intersect1d(sources, edges_df.index), id2]))
if(exclude_source): ds = list(np.setdiff1d(ds, sources)) # exclude source from downstream
ds = list(np.setdiff1d(ds, exclude)) # exclude user-selected neurons from downstream partners
if(hops_iter==hops):
return([ds])
else:
hops_iter += 1
return([ds] + Promat.downstream_multihop(edges=edges, sources=ds, hops=hops, hops_iter=hops_iter))
# recursive function that identifies all upstream partners X-hops away from source
# uses pregenerated edge list from threshold_edge_list() or the split-pair version
@staticmethod
def upstream_multihop(edges, sources, hops, hops_iter=1, pairs_combined=False, exclude_source=True, exclude=[], exclude_skids_from_source=[]):
if(pairs_combined):
id1 = 'downstream_pair_id'
id2 = 'upstream_pair_id'
if(pairs_combined==False):
id1 = 'downstream_skid'
id2 = 'upstream_skid'
edges_df = edges.set_index(id1)
if(hops_iter>1): sources = list(np.setdiff1d(sources, exclude_skids_from_source)) # exclude user-selected neurons from sources
us = list(np.unique(edges_df.loc[np.intersect1d(sources, edges_df.index), id2]))
if(exclude_source): us = list(np.setdiff1d(us, sources)) # exclude source from upstream
us = list(np.setdiff1d(us, exclude)) # exclude user-selected neurons from upstream partners
if(hops_iter==hops):
return([us])
else:
hops_iter += 1
return([us] + Promat.upstream_multihop(edges=edges, sources=us, hops=hops, hops_iter=hops_iter, exclude_source=exclude_source, exclude=exclude))
@staticmethod
def find_all_partners(pairids, edgelist, all_paired_skids=True):
pairs = Promat.get_pairs()
data = []
for pairid in pairids:
if(pairid in list(edgelist.upstream_pair_id)): # make sure pairid has downstream partners (TRUE if it is in upstream_pair_id list); if not, manually set empty list
downstream = list(np.unique(edgelist.set_index('upstream_pair_id').loc[pairid, 'downstream_pair_id']))
else: downstream = []
if(pairid in list(edgelist.downstream_pair_id)): # make sure pairid has upstream partners (TRUE if it is in downstream_pair_id list); if not, manually set empty list
upstream = list(np.unique(edgelist.set_index('downstream_pair_id').loc[pairid, 'upstream_pair_id']))
else: upstream = []
if(all_paired_skids):
downstream = Promat.get_paired_skids(downstream, pairs)
downstream = list(downstream.leftid) + list(downstream.rightid)
upstream = Promat.get_paired_skids(upstream, pairs)
upstream = list(upstream.leftid) + list(upstream.rightid)
pair = Promat.get_paired_skids(pairid, pairs)
data.append([pairid, pair, upstream, downstream])
if(all_paired_skids):
df = pd.DataFrame(data, columns = ['source_pairid', 'source_pair', 'upstream', 'downstream'])
return(df)
@staticmethod
def find_all_partners_hemispheres(pairids, edgelist, all_paired_skids=True):
pairs = Promat.get_pairs()
data = []
for pairid in pairids:
# identify downstream partners
if(pairid in list(edgelist.upstream_pair_id)): # make sure pairid has downstream partners (TRUE if it is in upstream_pair_id list); if not, manually set empty list
downstream = edgelist.set_index('upstream_pair_id').loc[pairid, :]
if(type(downstream)==pd.Series): downstream = pd.DataFrame([downstream]) # convert to pd.DataFrame if it is a Series (meaning only one partner); causes issues otherwise
downstream_ipsi = list(np.unique(downstream[downstream.type=='ipsilateral']['downstream_pair_id']))
downstream_contra = list(np.unique(downstream[downstream.type=='contralateral']['downstream_pair_id']))
else:
downstream_ipsi = []
downstream_contra = []
# identify upstream partners
if(pairid in list(edgelist.downstream_pair_id)): # make sure pairid has upstream partners (TRUE if it is in downstream_pair_id list); if not, manually set empty list
upstream = edgelist.set_index('downstream_pair_id').loc[pairid, :]
if(type(upstream)==pd.Series): upstream = | pd.DataFrame([upstream]) | pandas.DataFrame |
import os
import unittest
import random
import sys
import site # so that ai4water directory is in path
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler
from ai4water.preprocessing.datahandler import MultiLocDataHandler
from ai4water.datasets import load_u1, arg_beach
os.environ['PYTHONHASHSEED'] = '313'
random.seed(313)
np.random.seed(313)
# todo, check last dimension of x,y
# todo test with 3d y
def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'):
feat_dim = 1
if lookback > 1:
assert x.shape[1] == lookback
feat_dim = 2
assert x.shape[
feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}"
if y is not None:
assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}"
else:
assert num_outs == 0
y = x # just for next statement to run
if prev_y is None:
prev_y = x # just for next statement to run
assert x.shape[0] == y.shape[0] == prev_y.shape[
0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}"
if num_examples:
assert x.shape[
0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}'
return
def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'):
if isinstance(x, np.ndarray):
_check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples,
data_type=data_type)
elif isinstance(x, list):
while len(y)<len(x):
y.append(None)
for idx, i in enumerate(x):
_check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx],
data_loader.num_outs[idx], num_examples, data_type=data_type
)
elif isinstance(x, dict):
for key, i in x.items():
_check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key],
data_loader.num_outs[key], num_examples, data_type=data_type
)
elif x is None: # all should be None
assert all(v is None for v in [x, prev_y, y])
else:
raise ValueError
def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs):
val_examples = 0
if val_ex:
val_examples = val_x.shape[0]
test_examples = 0
if test_ex:
test_examples = test_x.shape[0]
xyz_samples = train_x.shape[0] + val_examples + test_examples
# todo, whould be equal
assert xyz_samples == tot_obs, f"""
data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}."""
def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader):
if isinstance(train_x, np.ndarray):
_check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df())
elif isinstance(train_x, list):
for idx in range(len(train_x)):
_check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex,
data_loader.tot_obs_for_one_df()[idx])
return
def check_inverse_transformation(data, data_loader, y, cols, key):
if cols is None:
# not output columns, so not checking
return
# check that after inverse transformation, we get correct y.
if data_loader.source_is_df:
train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key)
train_y_, index = data_loader.deindexify(train_y_, key=key)
compare_individual_item(data, key, cols, train_y_, data_loader)
elif data_loader.source_is_list:
#for idx in range(data_loader.num_sources):
# y_ = y[idx].reshape(-1, len(cols[idx]))
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for idx, y in enumerate(train_y_):
compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader)
elif data_loader.source_is_dict:
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for src_name, val in train_y_.items():
compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader)
def compare_individual_item(data, key, cols, y, data_loader):
if y is None:
return
train_index = data_loader.indexes[key]
if y.__class__.__name__ in ['DataFrame']:
y = y.values
for i, v in zip(train_index, y):
if len(cols) == 1:
if isinstance(train_index, pd.DatetimeIndex):
# if true value in data is None, y's value should also be None
if np.isnan(data[cols].loc[i]).item():
assert np.isnan(v).item()
else:
_t = round(data[cols].loc[i].item(), 0)
_p = round(v.item(), 0)
if not np.allclose(data[cols].loc[i].item(), v.item()):
print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(v, np.ndarray):
v = round(v.item(), 3)
_true = round(data[cols].loc[i], 3).item()
_p = round(v, 3)
if _true != _p:
print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(train_index, pd.DatetimeIndex):
assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}'
else:
assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001
def check_kfold_splits(data_handler):
if data_handler.source_is_df:
splits = data_handler.KFold_splits()
for (train_x, train_y), (test_x, test_y) in splits:
... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return
def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader):
if isinstance(train_y, list):
assert isinstance(val_y, list)
assert isinstance(test_y, list)
train_y = train_y[0]
val_y = val_y[0]
test_y = test_y[0]
if isinstance(train_y, dict):
train_y = list(train_y.values())[0]
assert isinstance(val_y, dict)
isinstance(test_y, dict)
val_y = list(val_y.values())[0]
test_y = list(test_y.values())[0]
if out_cols is not None:
b = train_y.reshape(-1, )
if val_y is None:
a = test_y.reshape(-1, )
else:
a = val_y.reshape(-1, )
if not len(np.intersect1d(a, b)) == 0:
raise ValueError(f'train and val have overlapping values')
if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None:
a = test_y.reshape(-1,)
b = val_y.reshape(-1,)
assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values'
return
def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True,
assert_uniqueness=True, check_examples=True,
true_train_y=None, true_val_y=None, true_test_y=None):
config['teacher_forcing'] = True # todo
if 'val_fraction' not in config:
config['val_fraction'] = 0.3
if 'test_fraction' not in config:
config['test_fraction'] = 0.3
data_loader = DataHandler(data=data, save=save, verbosity=0, **config)
#dl = DataLoader.from_h5('data.h5')
train_x, prev_y, train_y = data_loader.training_data(key='train')
assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex)
val_x, prev_y, val_y = data_loader.validation_data(key='val')
assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation')
test_x, prev_y, test_y = data_loader.test_data(key='test')
assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test')
if check_examples:
check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader)
if isinstance(data, str):
data = data_loader.data
check_inverse_transformation(data, data_loader, train_y, out_cols, 'train')
if val_ex:
check_inverse_transformation(data, data_loader, val_y, out_cols, 'val')
if test_ex:
check_inverse_transformation(data, data_loader, test_y, out_cols, 'test')
check_kfold_splits(data_loader)
if assert_uniqueness:
assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader)
if true_train_y is not None:
assert np.allclose(train_y, true_train_y)
if true_val_y is not None:
assert np.allclose(val_y, true_val_y)
if true_test_y is not None:
assert np.allclose(test_y, true_test_y)
return data_loader
class TestAllCases(object):
def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True):
self.input_features = input_features
self.output_features = output_features
self.lookback = lookback
self.allow_nan_labels=allow_nan_labels
self.save=save
self.run_all()
def run_all(self):
all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']]
for m in all_methods:
getattr(self, m)()
return
def test_basic(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22
test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30
if self.output_features == ['c']:
tty = np.arange(202, 250).reshape(-1, 1, 1)
tvy = np.arange(250, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
check_examples=True,
)
assert loader.source_is_df
return
def test_with_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random'}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 20, 30,
save=self.save,
)
assert loader.source_is_df
return
def test_drop_remainder(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'batch_size': 8,
'drop_remainder': True,
'train_data': 'random'}
loader = build_and_test_loader(data, config, self.output_features,
48, 16, 24,
check_examples=False,
save=self.save,
)
assert loader.source_is_df
return
def test_with_same_val_data(self):
# val_data is "same" as and train_data is make based upon fractions.
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same'}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 29, 29,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
def test_with_same_val_data_and_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same'}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 30, 30,
check_examples=False,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_val_data(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 29,
true_train_y=tty,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_no_val_data_with_random(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = | pd.DataFrame(data, columns=['a', 'b', 'c']) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import math
import altair as alt
import numpy as np
import pydeck as pdk
st.beta_set_page_config(layout="wide")
# Function to remove dollar from price (TBD - move to cleaning later)
def remove_dollar(price):
return price.replace('$', '').replace(',', '')
# Function to add month to the dataframe
def get_month(date):
separator = "/"
if date.__contains__('-'):
separator = '-'
month_x = str(date).split(separator)[1]
return month_x
# Function to add year to the dataframe
def get_year(date):
separator = "/"
if date.__contains__('-'):
separator = '-'
year = str(date).split(separator)[0]
return year
def get_coordinates(lat, lon):
coordinates_list = list()
coordinates_list.append(lat)
coordinates_list.append(lon)
return coordinates_list
@st.cache # add caching so we load the data only once
def load_data(): # Load the airbnb data acquired from InsideAirbnb.com
# Server
root_path = "https://raw.githubusercontent.com/CMU-IDS-2020/a3-data-diggers/master/data/"
reviews = {'NYC': pd.read_csv(root_path + 'NYC_reviews.csv')}
NYC_listings = {'01': pd.read_csv(root_path + '2020/NYC/listings_01.csv'),
'02': pd.read_csv(root_path + '2020/NYC/listings_02.csv'),
'03': pd.read_csv(root_path + '2020/NYC/listings_03.csv'),
'04': pd.read_csv(root_path + '2020/NYC/listings_04.csv'),
'05': pd.read_csv(root_path + '2020/NYC/listings_05.csv'),
'06': pd.read_csv(root_path + '2020/NYC/listings_06.csv'),
'07': pd.read_csv(root_path + '2020/NYC/listings_07.csv'),
'08': pd.read_csv(root_path + '2020/NYC/listings_08.csv'),
'09': pd.read_csv(root_path + '2020/NYC/listings_09.csv')}
# Calculating + appending month and year to the reviews dataframe
for key in reviews.keys():
rdf = reviews[key]
rdf["Month"] = rdf.apply(lambda row: get_month(row["date"]), axis=1)
rdf["year"] = rdf.apply(lambda row: get_year(row["date"]), axis=1)
reviews[key] = rdf
# Removing dollar from price, removing nan
for key in NYC_listings.keys():
ldf = NYC_listings[key]
ldf["price"] = ldf["price"].fillna(0)
ldf["price"] = ldf.apply(lambda row: remove_dollar(row["price"]), axis=1)
ldf["price"] = pd.to_numeric(ldf["price"])
ldf['neighbourhood_group_cleansed'] = ldf['neighbourhood_group_cleansed'].fillna("other")
ldf['room_type'] = ldf['room_type'].fillna("other")
ldf['bedrooms'] = ldf['bedrooms'].fillna(0)
NYC_listings[key] = ldf
# Load in the Covid-19 data
covid_data = {'09': | pd.read_csv(root_path + '2020/COVID/covid_data_cleaned_09.csv') | pandas.read_csv |
from ..workflow_obj import workflow_obj
from ..ui import get_run_data
from ..formatter import add_cols, remove_blanks, remove_pools, get_pos
import pandas as pd
from workflow.logger import Script_Logger
class WorkflowObj2(workflow_obj):
# constructor
def __init__(self):
self.id = "WF_2"
self.log = Script_Logger("WF_2_Parse_Run_Data")
self.log.start_log("Initialization of WF_2_sucessful")
# methods
def get_json(self):
super().get_json(2)
self.log.write_log("get_json","Argument passed was 2")
def get_info_from_json(self, runId):
self.log.write_log("get_info_from_json","runing")
run_data, self.machine_num, self.wgs_run_date, \
self.day_run_num, self.platform = get_run_data(runId)
self.log.write_log("get_info_from_json","get_run_data completed, now cleaing up controls")
if self.include_controls:
neg = False
pos = False
# store away the control values for the run
for sample_num in range(len(run_data['hsn'])):
if "neg" in run_data['hsn'][sample_num].lower():
self.neg_ctrl_pass = (run_data['percent_cvg'][sample_num] <= self.neg_percent_cvg_cutoff)
self.neg_name = "1" + self.wgs_run_date[:-4].replace("/", "") + self.wgs_run_date[-2:] + str(self.machine_num) + str(self.day_run_num)
run_data['hsn'][sample_num] = self.neg_name
neg = True
if "pos" in run_data['hsn'][sample_num].lower():
self.pos_ctrl_pass = (run_data['percent_cvg'][sample_num] >= self.percent_cvg_cutoff)
self.pos_name = "2" + self.wgs_run_date[:-4].replace("/", "") + self.wgs_run_date[-2:] + str(self.machine_num) + str(self.day_run_num)
run_data['hsn'][sample_num] = self.pos_name
pos = True
if neg and pos:
break
self.log.write_log("get_info_from_json"," creating df for qc/research table")
# create dataframe for QC/Research table
self.df_qc = pd.DataFrame.from_dict(run_data)
def format_dataframe(self):
self.log.write_log("format_dataframe","runing")
self.df_qc = remove_pools(self.df_qc, 'hsn')
self.df_qc = remove_blanks(self.df_qc, 'hsn')
# add columns
self.log.write_log("format_dataframe","adding columns")
self.df_qc['position'] = self.df_qc.apply(lambda row: get_pos(row['position']), axis=1)
self.df_qc = add_cols(obj=self, \
df=self.df_qc, \
col_lst=self.add_col_lst, \
col_func_map=self.col_func_map)
self.df_qc = self.df_qc.astype({"wgs_run_date": str})
if self.include_controls:
self.df_qc=add_cols(obj=self, \
df=self.df_qc, \
col_lst=self.add_col_lst_ctrl, \
col_func_map=self.col_func_map)
else:
self.df_qc['pos_pass'] = 0
self.df_qc['neg_pass'] = 0
self.df_qc['reportable'] = 0
self.log.write_log("format_dataframe","creating results df")
# create dataframe for results table
self.df_results = self.df_qc.copy()
# sort/remove columns to match table 1
# sort/remove columns to match table 2
self.df_results = pd.DataFrame(self.df_results[self.df_results_cols])
self.df_qc = | pd.DataFrame(self.df_qc[self.df_qc_cols]) | pandas.DataFrame |
# ---------------------------------
# Name : adjmatvec.py
# Author : E.Taskesen
# Contact : <EMAIL>
# Licence : See licences
# ---------------------------------
import pandas as pd
import numpy as np
from ismember import ismember
# %% Convert adjacency matrix to vector
def vec2adjmat(source, target, weight=None, symmetric=True):
"""Convert source and target into adjacency matrix.
Parameters
----------
source : list
The source node.
target : list
The target node.
weight : list of int
The Weights between the source-target values
symmetric : bool, optional
Make the adjacency matrix symmetric with the same number of rows as columns. The default is True.
Returns
-------
pd.DataFrame
adjacency matrix.
Examples
--------
>>> source=['Cloudy','Cloudy','Sprinkler','Rain']
>>> target=['Sprinkler','Rain','Wet_Grass','Wet_Grass']
>>> vec2adjmat(source, target)
>>>
>>> weight=[1,2,1,3]
>>> vec2adjmat(source, target, weight=weight)
"""
if len(source)!=len(target): raise Exception('[hnet] >Source and Target should have equal elements.')
if weight is None: weight = [1]*len(source)
df = | pd.DataFrame(np.c_[source, target], columns=['source','target']) | pandas.DataFrame |
import typing as tp
import collections
import itertools as it
import string
import shutil
import tempfile
import os
from urllib import request
import numpy as np
import pandas as pd
import static_frame as sf
from static_frame.performance.perf_test import PerfTest
class SampleData:
_store = {}
URL_CSV = 'https://data.ny.gov/api/views/xe9x-a24f/rows.csv?accessType=DOWNLOAD'
FP_CSV = '/tmp/sf_pydata_2018.csv'
URL_JSON = 'https://jsonplaceholder.typicode.com/photos'
@classmethod
def create(cls):
if not os.path.exists(cls.FP_CSV):
with request.urlopen(cls.URL_CSV) as response:
with open(cls.FP_CSV, 'w') as f:
f.write(response.read().decode('utf-8'))
cls._store['data_csv_fp'] = cls.FP_CSV
cls._store['data_json_url'] = cls.URL_JSON
labels_src = list(''.join(x) for x in it.combinations(string.ascii_lowercase, 4))
assert len(labels_src) > 10000
index = labels_src[:10000]
columns = labels_src[:1000]
data_float = np.random.rand(len(index), len(columns))
# alt floats, Bools
data_func = [
lambda: np.random.rand(len(index)),
lambda: np.random.randint(-1, 1, len(index)).astype(bool)
]
cls._store['index'] = index
cls._store['index_target'] = [idx for idx in index if 'd' in idx]
cls._store['columns'] = columns
cls._store['columns_target'] = [c for c in columns if 'd' in c]
cls._store['data_float'] = data_float
cls._store['data_func'] = data_func
cls._store['sf.FrameFloat'] = sf.Frame(data_float, index=index, columns=columns)
cls._store['pd.FrameFloat'] = | pd.DataFrame(data_float, index=index, columns=columns) | pandas.DataFrame |
import json
import math
import os
import random
import sys
import time
import warnings
from functools import reduce
from itertools import combinations, product
from operator import add
from typing import List, Sequence, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pretty_errors
import scipy.optimize as sco
import seaborn as sns
import statsmodels.api as sm
from pyecharts import options as opts
from pyecharts.charts import Bar, Grid, Kline, Line
from pyecharts.commons.utils import JsCode
from statsmodels import regression
from tqdm import tqdm, trange
sys.path.append(os.path.dirname(__file__) + os.sep + '../')
try:
from ..data.Stock import StockData
from ..log.log import hide, makedir, progress_bar, show, slog, sprint
except:
from data.Stock import StockData
from log.log import hide, makedir, progress_bar, show, slog, sprint
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False # 正常显示负号
plt.rcParams['font.size'] = 13
class Markovitz(object):
'''
组合投资权重\n
names=['贵州茅台', '隆基股份', '五粮液']\n
start_date='2021-05-01'\n
end_date='2021-11-01'\n
frequency='d' --> d/w/m\n
rfr=0.023467/365\n
funds=10000000\n
path --> 默认缓存路径为:".\\Suluoya cache\\",可传入False不缓存
'''
def __init__(self, names=['比亚迪', '阳光电源', '璞泰来', '紫光国微', '盛新锂能'],
start_date='2021-05-01',
end_date='2021-11-01',
frequency='d',
rfr=0.023467,
funds=10000000,
path='.\\Markovitz cache\\'):
self.names = names
self.lens = len(names)
self.start_date = start_date
self.end_date = end_date
self.frequency = frequency
self.rfr = (rfr*100) / \
{'d': 365, 'w': 52, 'm': 30}[frequency]
self.funds = funds
self.path = path
if self.path:
makedir(self.path, '')
sprint('Initializing...')
if not self.path:
sd = StockData(names=self.names, start_date=self.start_date,
end_date=self.end_date, frequency=self.frequency)
self.datas = sd.stocks_data()
else:
try:
self.datas = pd.read_csv(
f'{self.path}\\stock data\\stocks_data.csv')
except:
sd = StockData(names=self.names, start_date=self.start_date,
end_date=self.end_date, frequency=self.frequency, path=self.path)
self.datas = sd.stocks_data()
self.datas.index = self.datas['name']
self.data = self.datas.reset_index(drop=True)
self.date = list(map(lambda x: str(x)[:10], self.data.date.unique()))
self.first_date = self.date[0]
self.last_date = self.date[-1]
# 第一天开盘价
self.first_price = self.data[self.data.date == self.data.date.unique(
)[0]][['open', 'name']].set_index('name').to_dict()['open']
# 最后一天收盘价
self.last_price = self.data[self.data.date == self.data.date.unique(
)[-1]][['close', 'name']].set_index('name').to_dict()['close']
# 每只股票最大手数
self.max_shares_dict = {name: math.floor(
self.funds/(shares*100)) for name, shares in self.last_price.items()}
def weights(self, number=5000):
'''
生成股票随机权重
'''
return np.random.dirichlet(np.ones(self.lens), size=number)
def calculate(self):
'''
计算收益率均值、协方差矩阵、相关系数矩阵
'''
data = self.data[['date', 'name', 'pctChg']]
# 收益率均值
data_mean = data.groupby('name').mean().T[self.names]
# 协方差矩阵 & 相关系数矩阵
df = pd.DataFrame()
for name in self.names:
df[name] = list(data[data['name'] == name]['pctChg'])
data_cov = df.cov()
data_corr = df.corr()
if self.path:
makedir(self.path, 'mean,cov,corr')
data_mean.T.to_csv(
f'{self.path}\\mean,cov,corr\\data_mean.csv')
data_cov.to_csv(f'{self.path}\\mean,cov,corr\\data_cov.csv')
data_corr.to_csv(f'{self.path}\\mean,cov,corr\\data_corr.csv')
return {'mean': data_mean, 'cov': data_cov, 'correlation': data_corr}
def heatmap(self, show=True):
'''
收益率相关系数热力图
'''
if self.path:
try:
data_corr = pd.read_csv(f'{self.path}\\mean,cov,corr\\data_corr.csv').rename(
{'Unnamed: 0', 'correlation'}).set_index('correlation')
except:
data_corr = self.calculate()['correlation']
else:
data = self.data[['name', 'pctChg']]
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
dist = operations.Distribution(['grp', 'platform'], sum_x)
output = dist.compute_on(df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 0.5, 0.25, 0.25],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_melted(self):
output = self.distribution.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.25, 0.75],
'grp': ['A', 'B'],
'Metric': ['Distribution of sum(X)', 'Distribution of sum(X)']
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby(self):
output = self.distribution.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_melted(self):
output = self.distribution.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'Metric': ['Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 1, 2, 3.5],
'grp': ['A', 'A', 'B', 'B'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.distribution.compute_on(df, ['grp0', 'country'])
bar = self.distribution.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.distribution.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
from hyperopt import fmin, tpe, space_eval, hp, Trials, STATUS_OK, STATUS_FAIL
from hyperopt.pyll import stochastic, scope
from os.path import join
import os
import pandas as pd
import numpy as np
@scope.define
def round_n(x, n=3):
return np.round(x, n)
def monitor_callback(params, scores, name=''):
tmp = {'NED':scores['ned'],
'Coverage': scores['coverageNS'],
'scores': scores}
tmp = {**params['disc'], **params['clustering'], **tmp}
outfile = join(params['exp_root'], 'results', name + '_expresults.csv')
if os.path.exists(outfile):
pd.DataFrame([tmp]).to_csv(outfile, mode='a', header=False)
else:
| pd.DataFrame([tmp]) | pandas.DataFrame |
from argparse import ArgumentParser
from pathlib import Path
import os
import time
import glob
import torch
import logging
import json
import random
import numpy as np
import pandas as pd
from contextlib import contextmanager
from collections import namedtuple, defaultdict
from tempfile import TemporaryDirectory
import numba.cuda as profile_cuda
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
from utils import Timers
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
log_format = '%(asctime)-10s: %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
def check_files(checkpoint_path, prefix, max_files=10):
"""
checkFiles
Check the number of checkpoints, if it exceeds max_files, delete the oldest ones,
return the latest checkpoint file path.
checkpoint_path: str, path to checkpoints
max_files: int, maximum number of checkpoints to retain
"""
try:
pattern = os.path.join(checkpoint_path, prefix + "*.tar")
checkpoint_files = glob.glob(pattern)
checkpoint_files.sort(key=lambda x: os.path.getmtime(x))
except FileNotFoundError:
return None
try:
latest_checkpoint = checkpoint_files[-1]
except IndexError:
# No checkpoint files, list is empty!
latest_checkpoint = None
print("CURRENTLY %d CHECKPOINTS" % len(checkpoint_files))
if len(checkpoint_files) > max_files:
logging.info("DELETE EXCESS CHECKPOINTS")
for idx, checkpoint_file in enumerate(checkpoint_files[:-max_files]):
if checkpoint_file=='training_checkpoint_most_recent.tar':continue
logging.info("DELETE %s" % checkpoint_file)
os.remove(checkpoint_file)
return latest_checkpoint
def save_checkpoint(model, optimizer, epoch, global_step, checkpoint_path, filename):
"""
saveCheckpoint
Save the model and optimizer state in a dictionary
model: [class], torch model instance
optimizer: [class], torch optimizer instance
epoch: int, current epoch
global_step: int, current global step
checkpoint_path: string, path
filename: string, name of the checkpoint file
"""
logging.info("** ** * Saving fine-tuned model ** ** * ")
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path, exist_ok=True)
torch.save({"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"global_step": global_step}, filename)
logging.info("** ** * Model saved! ** ** * ")
def restore_checkpoint(model, optimizer, checkpoint_file, device):
"""
Restores model and optimizer from a checkpoint file and returns checkpoint information.
Has side effect of loading the state_dict for model and optimizer (i.e. modifies the instances).
:param model: [class], torch model instance
:param optimizer: [class], torch optimizer instance
:param checkpoint_file: string, full file path
:param device: [class], torch device instance
:return: Tuple of the checkpoint values
"""
assert checkpoint_file
logging.info("** ** * Restore from checkpoint: %s" % checkpoint_file)
checkpoint_state = torch.load(checkpoint_file, map_location=device)
model.load_state_dict(checkpoint_state["model_state_dict"])
optimizer.load_state_dict(checkpoint_state["optimizer_state_dict"])
last_epoch = checkpoint_state["epoch"]
global_step = checkpoint_state["global_step"]
logging.info(" RESTORED AT epoch:%d-%s, global_step:%d" % (last_epoch, global_step))
logging.info("** ** * Model restored! ** ** * ")
# model.train() # Do this in calling code for now, maybe want model.eval() there instead
return last_epoch, global_step
def convert_example_to_features(example, tokenizer, max_seq_length):
tokens = example["tokens"]
segment_ids = example["segment_ids"]
is_random_next = example["is_random_next"]
masked_lm_positions = example["masked_lm_positions"]
masked_lm_labels = example["masked_lm_labels"]
assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated
input_ids = tokenizer.convert_tokens_to_ids(tokens)
masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[:len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.bool)
mask_array[:len(input_ids)] = 1
segment_array = np.zeros(max_seq_length, dtype=np.bool)
segment_array[:len(segment_ids)] = segment_ids
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_label_ids
features = InputFeatures(input_ids=input_array,
input_mask=mask_array,
segment_ids=segment_array,
lm_label_ids=lm_label_array,
is_next=is_random_next)
return features
class PregeneratedDataset(Dataset):
def __init__(self, training_path, epoch, chunk, tokenizer, num_data_epochs, reduce_memory=False):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.epoch = epoch
self.data_epoch = epoch % num_data_epochs
data_file = training_path / f"epoch_{self.data_epoch}-{chunk}.json"
data_zip = training_path / f"epoch_{self.data_epoch}-{chunk}.zip"
if not os.path.isfile(data_file):
# If file not there, then there should be a zip file that extracts to it
extract_zip(data_zip)
assert os.path.isfile(data_file)
logging.info('Training on: {}'.format(data_file))
metrics_file = training_path / f"metrics_epoch_{self.data_epoch}-{chunk}.json"
assert data_file.is_file() and metrics_file.is_file()
metrics = json.loads(metrics_file.read_text())
num_samples = metrics['num_training_examples']
seq_len = metrics['max_seq_len']
self.temp_dir = None
self.working_dir = None
if reduce_memory:
self.temp_dir = TemporaryDirectory()
self.working_dir = Path(self.temp_dir.name)
input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
lm_label_ids[:] = -1
is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap',
shape=(num_samples,), mode='w+', dtype=np.bool)
else:
input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)
input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)
logging.info(f"Loading training examples for epoch {epoch}")
with data_file.open() as f:
for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")):
line = line.strip()
example = json.loads(line)
features = convert_example_to_features(example, tokenizer, seq_len)
input_ids[i] = features.input_ids
segment_ids[i] = features.segment_ids
input_masks[i] = features.input_mask
lm_label_ids[i] = features.lm_label_ids
is_nexts[i] = features.is_next
assert i == num_samples - 1 # Assert that the sample count metric was true
logging.info("Loading complete!")
self.num_samples = num_samples
self.seq_len = seq_len
self.input_ids = input_ids
self.input_masks = input_masks
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
self.is_nexts = is_nexts
def __len__(self):
return self.num_samples
def __getitem__(self, item):
return (torch.tensor(self.input_ids[item].astype(np.int64)),
torch.tensor(self.input_masks[item].astype(np.int64)),
torch.tensor(self.segment_ids[item].astype(np.int64)),
torch.tensor(self.lm_label_ids[item].astype(np.int64)),
torch.tensor(self.is_nexts[item].astype(np.int64)))
def get_chunks(dir_path, epoch):
"""
Look in the specified directory for files of the form epoch_0-000, epoch_0-001, ...etc.
and return a list of the chunks e.g. ['000', '001', '002', ...]
There could be a mix of .json and .zip files so sometimes we could get duplicates.
"""
if isinstance(dir_path, Path):
dir_path = str(dir_path)
chunks = [x.split('-')[-1].strip('.json').strip('.zip') for x in glob.glob("{}/epoch_{}-*".format(dir_path, epoch))]
chunks = list(set(chunks))
return sorted(chunks)
def get_args():
parser = ArgumentParser()
parser.add_argument('--pregenerated_data', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=True)
parser.add_argument('--restore_dir', type=Path, help="Restore from a checkpoint file and continue training")
parser.add_argument("--bert_model", type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--do_lower_case", action="store_true")
parser.add_argument("--reduce_memory", action="store_true",
help="Store training data as on-disc memmaps to massively reduce memory usage")
parser.add_argument("--epochs", type=int,
default=3, help="Number of epochs to train for")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--num_workers", type=int,
default=0, help="Number of workers to load data")
# training config
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--batch_size", default=12, type=int,
help="Total batch size for training.")
parser.add_argument("--seq_length", default=128, type=int,
help="Seq length of each sample.")
parser.add_argument('--train_iters', type=int, default=2000,
help='number of iterations per epoch')
# distributed training config
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus. Passed from distributed launcher")
# AMP config
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
# optimization
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# nvprof args
parser.add_argument('--nvprof', action='store_true',
help='profile this program')
parser.add_argument('--profile_start', type=int, default=200,
help="""Start iteration of nvidia profiler""")
parser.add_argument('--profile_stop', type=int, default=201,
help="""Stop iteration of nvidia profiler""")
parser.add_argument('--warmup_iter', type=int, default=200,
help="""Start iteration of nvidia profiler""")
# benchmarking args
parser.add_argument('--benchmark', action='store_true',
help='benchmark this program')
parser.add_argument('--benchmark_dir', type=str, default="benchmark_output",
help="""Dir to save benchmark output stats""")
parser.add_argument('--benchmark_start', type=int, default=1000,
help="""Start iteration of nvidia profiler""")
parser.add_argument('--benchmark_stop', type=int, default=2000,
help="""Stop iteration of nvidia profiler""")
parser.add_argument('--benchmark_partition', type=str, default="t4",
help="""Partition of gpus""")
parser.add_argument('--log_interval', type=int, default=100,
help='report interval')
args = parser.parse_args()
assert args.pregenerated_data.is_dir(), \
"--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
return args
def main():
args = get_args()
total_train_examples = 0
for i in range(args.epochs):
chunks = get_chunks(args.pregenerated_data, i)
if i == 0 and len(chunks) == 0:
exit("No training data was found!")
elif len(chunks) == 0:
print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
print("This script will loop over the available data, but training diversity may be negatively impacted.")
num_data_epochs = i
break
for chunk in chunks:
epoch_file = args.pregenerated_data / f"epoch_{i}-{chunk}.json"
epoch_zip = args.pregenerated_data / f"epoch_{i}-{chunk}.zip"
metrics_file = args.pregenerated_data / f"metrics_epoch_{i}-{chunk}.json"
if (epoch_file.is_file() or epoch_zip.is_file()) and metrics_file.is_file():
metrics = json.loads(metrics_file.read_text())
total_train_examples += metrics['num_training_examples']
else:
num_data_epochs = args.epochs
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend='nccl',
world_size=args.world_size,
rank=args.rank,
init_method=init_method)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
# torch.distributed.init_process_group(backend='nccl')
logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
# args.batch_size = args.batch_size // args.gradient_accumulation_steps
print("CUDA device count: {}".format(torch.cuda.device_count()))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
args.output_dir.mkdir(parents=True, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
num_train_optimization_steps = int(
total_train_examples / args.batch_size)
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // args.world_size
model = BertForPreTraining.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# scheduler not compatible with APEX::FP16_optimizer
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_optimization_steps)
if args.output_dir:
last_checkpoint = check_files(args.output_dir, args.bert_model)
last_epoch, global_step = restore_checkpoint(model, optimizer, last_checkpoint, device)
else:
last_epoch, global_step = 0, 0
logging.info("***** Running training *****")
logging.info(f" Num examples = {total_train_examples}")
logging.info(" Batch size = %d", args.batch_size)
logging.info(" Num steps = %d", num_train_optimization_steps)
iteration = 0
benchmark_stats = defaultdict(lambda: [])
grad_stats = defaultdict(lambda: [])
summary_writer = SummaryWriter() if args.rank == 0 else None
model.train()
for epoch in range(last_epoch, args.epochs):
shuffled_chunks = get_chunks(args.pregenerated_data, epoch)
random.shuffle(shuffled_chunks)
logging.info('New shuffled chunks: {}'.format(shuffled_chunks))
for chunk in shuffled_chunks:
epoch_dataset = PregeneratedDataset(epoch=epoch, chunk=chunk, training_path=args.pregenerated_data, tokenizer=tokenizer,
num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory)
if args.local_rank == -1:
train_sampler = RandomSampler(epoch_dataset)
else:
train_sampler = DistributedSampler(epoch_dataset)
train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.batch_size, num_workers=args.num_workers)
data_iterator = iter(train_dataloader)
timers = Timers()
timers('interval time').start()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for batch in data_iterator:
# while iteration < args.train_iters:
if args.nvprof:
if iteration == args.profile_start:
profile_cuda.profile_start()
print("CUDA profiling starts!")
if iteration == args.profile_stop:
profile_cuda.profile_stop()
print("CUDA profiling stops!")
iteration += 1
# benchmark dataloading time
# batch = next(data_iterator)
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
outputs = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
loss = outputs[0]
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
# loss = loss / args.gradient_accumulation_steps
if args.local_rank != -1:
if iteration % args.gradient_accumulation_steps == 0:
# we are using APEX DDP => enable_allreduce / disable_allreduce
# print("iteration {}, all reduce enabled!".format(iteration))
model.enable_allreduce()
else:
# print("iteration {}, all reduce disabled!".format(iteration))
model.disable_allreduce()
# note that loss.backward accumulates the gradient => gradient will be accumulated until we call zero_grad
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
# mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
mean_loss = tr_loss / nb_tr_steps
if iteration % args.gradient_accumulation_steps == 0:
start = time.time()
scheduler.step() # Update learning rate schedule (commented as lr_scheduler not compatible with FP16_Optimizer)
optimizer.step()
optimizer.zero_grad()
benchmark_stats['weight_update_time'].append(time.time() - start) # unit in s
global_step += 1
if iteration % args.log_interval == 0:
elapsed_time = timers('interval time').elapsed()
log_string = ' epoch{:2d} |'.format(epoch)
log_string += ' iteration {:8d} |'.format(iteration)
log_string += ' elapsed time per iteration (ms): {:.1f} |'.format(elapsed_time * 1000.0 / args.log_interval)
log_string += ' mean loss {:.3E} |'.format(mean_loss)
if args.rank == 0:
summary_writer.add_scalar('mean_loss', mean_loss, iteration)
# args.rank == 0 => this is master process
if args.benchmark and args.rank == 0:
if args.benchmark_start < iteration <= args.benchmark_stop:
benchmark_stats['iteration'].append(iteration)
benchmark_stats['seq_length'].append(args.seq_length)
benchmark_stats['batch_size'].append(args.batch_size * args.world_size)
benchmark_stats['num_tokens'].append(args.seq_length * args.batch_size * args.world_size)
benchmark_stats['elapsed_time'].append(elapsed_time)
benchmark_stats['log_interval'].append(args.log_interval)
print(log_string, flush=True)
# Save a trained model
if n_gpu > 1 and torch.distributed.get_rank() or n_gpu <=1:
logging.info("** ** * Saving fine-tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
save_checkpoint(
model,
optimizer,
epoch,
global_step,
args.output_dir,
os.path.join(args.output_dir, "{}_{}.tar".format(args.bert_model, global_step))
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Save a trained model
if n_gpu > 1 and torch.distributed.get_rank() == 0 or n_gpu <=1:
logging.info("** ** * Saving fine-tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
save_checkpoint(
model,
optimizer,
epoch,
global_step,
args.output_dir,
os.path.join(args.output_dir, "{}_{}.tar".format(args.bert_model, global_step))
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
if args.rank == 0:
summary_writer.close()
if args.benchmark and args.rank == 0:
benchmark_csv = {
k: [np.mean(l)] for k,l in benchmark_stats.items()
}
benchmark_csv['weight_update_time'] = args.log_interval * np.array(benchmark_csv['weight_update_time'])
benchmark_csv['token_throughput'] = np.array(benchmark_csv['num_tokens']) * np.array(benchmark_csv['log_interval'])\
/ np.array(benchmark_csv['elapsed_time'])
benchmark_csv['precision'] = [ 'fp16' if args.fp16 else 'fp32' ]
save_dir = os.path.join(
args.benchmark_dir,
"{gpus}_gpus_{partition}_trials".format(
gpus=args.world_size,
partition=args.benchmark_partition
)
)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
df = | pd.DataFrame.from_dict(benchmark_csv) | pandas.DataFrame.from_dict |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import operator
import string
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.testing._utils import (
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
@pytest.fixture
def pd_str_cat():
categories = list("abc")
codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2]
return pd.Categorical.from_codes(codes, categories=categories)
def test_categorical_basic():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
cudf_cat = cudf.Index(cat)
pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"])
sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"])
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
string = str(sr)
expect_str = """
p a
q a
r b
s c
t a
"""
assert all(x == y for x, y in zip(string.split(), expect_str.split()))
assert_eq(cat.codes, cudf_cat.codes.to_array())
def test_categorical_integer():
if not PANDAS_GE_110:
pytest.xfail(reason="pandas >=1.1 required")
cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(
cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_array()
)
assert sr.null_count == 2
np.testing.assert_array_equal(
pdsr.cat.codes.values,
sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_array(),
)
string = str(sr)
expect_str = """
0 a
1 <NA>
2 <NA>
3 c
4 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
assert string.split() == expect_str.split()
def test_categorical_compare_unordered():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# test equal
out = sr == sr
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr == pdsr)
# test inequality
out = sr != sr
assert not np.any(out.to_array())
assert not np.any(pdsr != pdsr)
assert not pdsr.cat.ordered
assert not sr.cat.ordered
# test using ordered operators
assert_exceptions_equal(
lfunc=operator.lt,
rfunc=operator.lt,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
)
def test_categorical_compare_ordered():
cat1 = pd.Categorical(
["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True
)
pdsr1 = pd.Series(cat1)
sr1 = cudf.Series(cat1)
cat2 = pd.Categorical(
["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True
)
pdsr2 = pd.Series(cat2)
sr2 = cudf.Series(cat2)
# test equal
out = sr1 == sr1
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr1 == pdsr1)
# test inequality
out = sr1 != sr1
assert not np.any(out.to_array())
assert not np.any(pdsr1 != pdsr1)
assert pdsr1.cat.ordered
assert sr1.cat.ordered
# test using ordered operators
np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_array())
np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_array())
def test_categorical_binary_add():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
expected_error_message="Series of dtype `category` cannot perform "
"the operation: add",
)
def test_categorical_unary_ceil():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=getattr,
rfunc=sr.ceil,
lfunc_args_and_kwargs=([pdsr, "ceil"],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: ceil",
)
def test_categorical_element_indexing():
"""
Element indexing to a cat column must give the underlying object
not the numerical index.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_eq(pdsr, sr)
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
def test_categorical_masking():
"""
Test common operation for getting a all rows that matches a certain
category.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# check scalar comparison
expect_matches = pdsr == "a"
got_matches = sr == "a"
np.testing.assert_array_equal(
expect_matches.values, got_matches.to_array()
)
# mask series
expect_masked = pdsr[expect_matches]
got_masked = sr[got_matches]
assert len(expect_masked) == len(got_masked)
assert len(expect_masked) == got_masked.valid_count
assert_eq(got_masked, expect_masked)
def test_df_cat_set_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a")
pddf = df.to_pandas(nullable_pd_dtype=False)
expect = pddf.set_index("a")
assert_eq(got, expect)
def test_df_cat_sort_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a").sort_index()
expect = df.to_pandas(nullable_pd_dtype=False).set_index("a").sort_index()
assert_eq(got, expect)
def test_cat_series_binop_error():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
dfa = df["a"]
dfb = df["b"]
# lhs is a categorical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfa, dfb],),
rfunc_args_and_kwargs=([dfa, dfb],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: add",
)
# if lhs is a numerical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfb, dfa],),
rfunc_args_and_kwargs=([dfb, dfa],),
check_exception_type=False,
expected_error_message="'add' operator not supported",
)
@pytest.mark.parametrize("num_elements", [10, 100, 1000])
def test_categorical_unique(num_elements):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), num_elements
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_sorted = np.sort(gdf["a"].unique().to_pandas())
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique_sorted = np.sort(pdf["a"].unique())
# verify
np.testing.assert_array_equal(pdf_unique_sorted, gdf_unique_sorted)
@pytest.mark.parametrize("nelem", [20, 50, 100])
def test_categorical_unique_count(nelem):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), nelem
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_count = gdf["a"].nunique()
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique = pdf["a"].unique()
# verify
assert gdf_unique_count == len(pdf_unique)
def test_categorical_empty():
cat = pd.Categorical([])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(cat.codes, sr.cat.codes.to_array())
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
def test_categorical_set_categories():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
psr = pd.Series(cat)
sr = cudf.Series.from_categorical(cat)
# adding category
expect = psr.cat.set_categories(["a", "b", "c", "d"])
got = sr.cat.set_categories(["a", "b", "c", "d"])
assert_eq(expect, got)
# removing category
expect = psr.cat.set_categories(["a", "b"])
got = sr.cat.set_categories(["a", "b"])
assert_eq(expect, got)
def test_categorical_set_categories_preserves_order():
series = pd.Series([1, 0, 0, 0, 2]).astype("category")
# reassigning categories should preserve element ordering
assert_eq(
series.cat.set_categories([1, 2]),
cudf.Series(series).cat.set_categories([1, 2]),
)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_ordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(False))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(False))
assert cd_sr.cat.ordered is False
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_ordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_ordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is True
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_unordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(True))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(True))
assert cd_sr.cat.ordered is True
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_unordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_unordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is False
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("from_ordered", [True, False])
@pytest.mark.parametrize("to_ordered", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_reorder_categories(
pd_str_cat, from_ordered, to_ordered, inplace
):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(from_ordered))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(from_ordered))
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
kwargs = dict(ordered=to_ordered, inplace=inplace)
pd_sr_1 = pd_sr.cat.reorder_categories(list("cba"), **kwargs)
cd_sr_1 = cd_sr.cat.reorder_categories(list("cba"), **kwargs)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert_eq(pd_sr_1, cd_sr_1)
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_add_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.add_categories(["d"], inplace=inplace)
cd_sr_1 = cd_sr.cat.add_categories(["d"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "d" in pd_sr_1.cat.categories.to_list()
assert "d" in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_remove_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.remove_categories(["a"], inplace=inplace)
cd_sr_1 = cd_sr.cat.remove_categories(["a"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "a" not in pd_sr_1.cat.categories.to_list()
assert "a" not in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
# test using ordered operators
assert_exceptions_equal(
lfunc=cd_sr.to_pandas().cat.remove_categories,
rfunc=cd_sr.cat.remove_categories,
lfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
rfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
expected_error_message="removals must all be in old categories",
)
def test_categorical_dataframe_slice_copy():
pdf = pd.DataFrame({"g": pd.Series(["a", "b", "z"], dtype="category")})
gdf = cudf.from_pandas(pdf)
exp = pdf[1:].copy()
gdf = gdf[1:].copy()
assert_eq(exp, gdf)
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3, 89]),
| pd.Series([1, 2, 3, 89, 3, 1, 89], dtype="category") | pandas.Series |
import contextlib
import json
import gzip
import io
import logging
import os.path
import pickle
import random
import shutil
import sys
import tempfile
import traceback
import unittest
import pandas
COMMON_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'common-primitives')
# NOTE: This insertion should appear before any code attempting to resolve or load primitives,
# so the git submodule version of `common-primitives` is looked at first.
sys.path.insert(0, COMMON_PRIMITIVES_DIR)
TEST_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'data', 'primitives')
sys.path.insert(0, TEST_PRIMITIVES_DIR)
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.no_split import NoSplitDatasetSplitPrimitive
from common_primitives.random_forest import RandomForestClassifierPrimitive
from common_primitives.train_score_split import TrainScoreDatasetSplitPrimitive
from test_primitives.random_classifier import RandomClassifierPrimitive
from test_primitives.fake_score import FakeScorePrimitive
from d3m import cli, index, runtime, utils
from d3m.container import dataset as dataset_module
from d3m.contrib.primitives.compute_scores import ComputeScoresPrimitive
from d3m.metadata import base as metadata_base, pipeline as pipeline_module, pipeline_run as pipeline_run_module, problem as problem_module
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
PROBLEM_DIR = os.path.join(TEST_DATA_DIR, 'problems')
DATASET_DIR = os.path.join(TEST_DATA_DIR, 'datasets')
PIPELINE_DIR = os.path.join(TEST_DATA_DIR, 'pipelines')
class TestCLIRuntime(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
@classmethod
def setUpClass(cls):
to_register = {
'd3m.primitives.data_transformation.dataset_to_dataframe.Common': DatasetToDataFramePrimitive,
'd3m.primitives.classification.random_forest.Common': RandomForestClassifierPrimitive,
'd3m.primitives.classification.random_classifier.Test': RandomClassifierPrimitive,
'd3m.primitives.data_transformation.column_parser.Common': ColumnParserPrimitive,
'd3m.primitives.data_transformation.construct_predictions.Common': ConstructPredictionsPrimitive,
'd3m.primitives.evaluation.no_split_dataset_split.Common': NoSplitDatasetSplitPrimitive,
'd3m.primitives.evaluation.compute_scores.Test': FakeScorePrimitive,
'd3m.primitives.evaluation.train_score_dataset_split.Common': TrainScoreDatasetSplitPrimitive,
# We do not have to load this primitive, but loading it here prevents the package from loading all primitives.
'd3m.primitives.evaluation.compute_scores.Core': ComputeScoresPrimitive,
}
# To hide any logging or stdout output.
with utils.silence():
for python_path, primitive in to_register.items():
index.register_primitive(python_path, primitive)
def _call_cli_runtime(self, arg):
logger = logging.getLogger('d3m.runtime')
with utils.silence():
with self.assertLogs(logger=logger) as cm:
# So that at least one message is logged.
logger.warning("Debugging.")
cli.main(arg)
# We skip our "debugging" message.
return cm.records[1:]
def _call_cli_runtime_without_fail(self, arg):
try:
return self._call_cli_runtime(arg)
except Exception as e:
self.fail(traceback.format_exc())
def _assert_valid_saved_pipeline_runs(self, pipeline_run_save_path):
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
try:
pipeline_run_module.validate_pipeline_run(pipeline_run_dict)
except Exception as e:
self.fail(traceback.format_exc())
def _validate_previous_pipeline_run_ids(self, pipeline_run_save_path):
ids = set()
prev_ids = set()
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
ids.add(pipeline_run_dict['id'])
if 'previous_pipeline_run' in pipeline_run_dict:
prev_ids.add(pipeline_run_dict['previous_pipeline_run']['id'])
self.assertTrue(
prev_ids.issubset(ids),
'Some previous pipeline run ids {} are not in the set of pipeline run ids {}'.format(prev_ids, ids)
)
def test_fit_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
def test_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--output',
output_csv_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11225, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11225, outputs_path='output.csv')
def test_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-no-problem-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--output',
output_csv_path,
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-no-problem-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--output',
output_csv_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_nonstandard_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=10710, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--not-standard-pipeline'
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_fit_produce_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
def test_fit_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_fit_score_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
@staticmethod
def _get_iris_dataset_path():
return os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json')
@staticmethod
def _get_iris_problem_path():
return os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json')
@staticmethod
def _get_random_forest_pipeline_path():
return os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml')
@staticmethod
def _get_no_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml')
@staticmethod
def _get_train_test_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-train-test-split.yml')
def _get_pipeline_run_save_path(self):
return os.path.join(self.test_dir, 'pipeline_run.yml')
def _get_predictions_path(self):
return os.path.join(self.test_dir, 'predictions.csv')
def _get_scores_path(self):
return os.path.join(self.test_dir, 'scores.csv')
def _get_pipeline_rerun_save_path(self):
return os.path.join(self.test_dir, 'pipeline_rerun.yml')
def _get_rescores_path(self):
return os.path.join(self.test_dir, 'rescores.csv')
def _fit_iris_random_forest(
self, *, predictions_path=None, fitted_pipeline_path=None, pipeline_run_save_path=None
):
if pipeline_run_save_path is None:
pipeline_run_save_path = self._get_pipeline_run_save_path()
arg = [
'',
'runtime',
'fit',
'--input',
self._get_iris_dataset_path(),
'--problem',
self._get_iris_problem_path(),
'--pipeline',
self._get_random_forest_pipeline_path(),
'-O',
pipeline_run_save_path
]
if predictions_path is not None:
arg.append('--output')
arg.append(predictions_path)
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def _fit_iris_random_classifier_without_problem(self, *, fitted_pipeline_path):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'-O',
pipeline_run_save_path
]
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def test_fit(self):
pipeline_run_save_path = self._get_pipeline_run_save_path()
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
self._fit_iris_random_forest(
fitted_pipeline_path=fitted_pipeline_path, pipeline_run_save_path=pipeline_run_save_path
)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
self.assertTrue(os.path.isfile(pipeline_run_save_path))
def test_evaluate(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scores',
scores_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'-O',
pipeline_run_save_path
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0], ['F1_MACRO', 1.0, 1.0, 0, 0]])
def test_evaluate_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0]])
def test_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scores',
scores_path,
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['F1_MACRO', 1.0, 1.0, 0], ['ACCURACY', 1.0, 1.0, 0]])
def test_score_without_problem_without_metric(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_classifier_without_problem(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
'--scores',
scores_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_score_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_classifier_without_problem(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path,
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_produce(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
arg = [
'',
'runtime',
'produce',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
def test_score_predictions(self):
predictions_path = os.path.join(self.test_dir, 'predictions.csv')
self._fit_iris_random_forest(predictions_path=predictions_path)
self.assertTrue(os.path.isfile(predictions_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score-predictions',
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--predictions',
predictions_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'--scores',
scores_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0], ['F1_MACRO', 1.0, 1.0]])
def test_sklearn_dataset_fit_produce(self):
self._create_sklearn_iris_problem_doc()
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
'sklearn://iris',
'--input',
'sklearn://iris',
'--problem',
os.path.join(self.test_dir, 'problemDoc.json'),
'--test-input',
'sklearn://iris',
'--test-input',
'sklearn://iris',
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'problemDoc.json',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_standard_output_metadata(prediction_type='numpy.int64')
self._assert_prediction_sum(prediction_sum=10648, outputs_path='outputs.0/data.csv')
def test_sklearn_dataset_fit_produce_without_problem(self):
output_csv_path = os.path.join(self.test_dir, 'output.csv')
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit-produce',
'--input',
'sklearn://iris',
'--test-input',
'sklearn://iris',
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--save',
fitted_pipeline_path,
'--output',
output_csv_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json',
])
self._assert_standard_output_metadata(prediction_type='numpy.int64')
self._assert_prediction_sum(prediction_sum=10648, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=10648, outputs_path='output.csv')
def _create_sklearn_iris_problem_doc(self):
with open(os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'), 'r', encoding='utf8') as problem_doc_file:
problem_doc = json.load(problem_doc_file)
problem_doc['inputs']['data'][0]['datasetID'] = 'sklearn://iris'
with open(os.path.join(self.test_dir, 'problemDoc.json'), 'x', encoding='utf8') as problem_doc_file:
json.dump(problem_doc, problem_doc_file)
def test_sklearn_dataset_evaluate(self):
self._create_sklearn_iris_problem_doc()
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
'sklearn://iris',
'--problem',
os.path.join(self.test_dir, 'problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scores',
scores_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'-O',
pipeline_run_save_path
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0], ['F1_MACRO', 1.0, 1.0, 0, 0]])
def test_sklearn_dataset_evaluate_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
'sklearn://iris',
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0]])
def _assert_prediction_sum(self, prediction_sum, outputs_path):
if prediction_sum is not None:
with open(os.path.join(self.test_dir, outputs_path), 'r') as csv_file:
self.assertEqual(sum([int(v) for v in list(csv_file)[1:]]), prediction_sum)
def _assert_standard_output_metadata(self, outputs_name='outputs.0', prediction_type='str'):
with open(os.path.join(self.test_dir, outputs_name, 'metadata.json'), 'r') as metadata_file:
metadata = json.load(metadata_file)
self.assertEqual(
metadata,
[
{
"selector": [],
"metadata": {
"dimension": {
"length": 150,
"name": "rows",
"semantic_types": ["https://metadata.datadrivendiscovery.org/types/TabularRow"],
},
"schema": "https://metadata.datadrivendiscovery.org/schemas/v0/container.json",
"semantic_types": ["https://metadata.datadrivendiscovery.org/types/Table"],
"structural_type": "d3m.container.pandas.DataFrame",
},
},
{
"selector": ["__ALL_ELEMENTS__"],
"metadata": {
"dimension": {
"length": 1,
"name": "columns",
"semantic_types": ["https://metadata.datadrivendiscovery.org/types/TabularColumn"],
}
},
},
{"selector": ["__ALL_ELEMENTS__", 0],
"metadata": {"name": "predictions", "structural_type": prediction_type}},
],
)
def _assert_nonstandard_output(self, outputs_name='outputs.1'):
with open(os.path.join(self.test_dir, outputs_name, 'data.csv'), 'r') as csv_file:
output_dataframe = pandas.read_csv(csv_file, index_col=False)
learning_dataframe = pandas.read_csv(
os.path.join(DATASET_DIR, 'iris_dataset_1/tables/learningData.csv'), index_col=False)
self.assertTrue(learning_dataframe.equals(output_dataframe))
with open(os.path.join(self.test_dir, outputs_name, 'metadata.json'), 'r') as metadata_file:
metadata = json.load(metadata_file)
self.assertEqual(
metadata,
[
{
"metadata": {
"dimension": {
"length": 150,
"name": "rows",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/TabularRow"
]
},
"schema": "https://metadata.datadrivendiscovery.org/schemas/v0/container.json",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/Table"
],
"structural_type": "d3m.container.pandas.DataFrame"
},
"selector": []
},
{
"metadata": {
"dimension": {
"length": 6,
"name": "columns",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/TabularColumn"
]
}
},
"selector": [
"__ALL_ELEMENTS__"
]
},
{
"metadata": {
"name": "d3mIndex",
"semantic_types": [
"http://schema.org/Integer",
"https://metadata.datadrivendiscovery.org/types/PrimaryKey"
],
"structural_type": "str"
},
"selector": [
"__ALL_ELEMENTS__",
0
]
},
{
"metadata": {
"name": "sepalLength",
"semantic_types": [
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
"structural_type": "str"
},
"selector": [
"__ALL_ELEMENTS__",
1
]
},
{
"metadata": {
"name": "sepalWidth",
"semantic_types": [
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
"structural_type": "str"
},
"selector": [
"__ALL_ELEMENTS__",
2
]
},
{
"metadata": {
"name": "petalLength",
"semantic_types": [
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
"structural_type": "str"
},
"selector": [
"__ALL_ELEMENTS__",
3
]
},
{
"metadata": {
"name": "petalWidth",
"semantic_types": [
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
"structural_type": "str"
},
"selector": [
"__ALL_ELEMENTS__",
4
]
},
{
"metadata": {
"name": "species",
"semantic_types": [
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
"https://metadata.datadrivendiscovery.org/types/SuggestedTarget",
"https://metadata.datadrivendiscovery.org/types/Attribute"
],
"structural_type": "str"
},
"selector": [
"__ALL_ELEMENTS__",
5
]
}
]
)
def _assert_pipeline_runs_equal(self, pipeline_run_save_path1, pipeline_run_save_path2):
with open(pipeline_run_save_path1, 'r') as f:
pipeline_runs1 = list(utils.yaml_load_all(f))
with open(pipeline_run_save_path2, 'r') as f:
pipeline_runs2 = list(utils.yaml_load_all(f))
self.assertEqual(len(pipeline_runs1), len(pipeline_runs2))
for pipeline_run1, pipeline_run2 in zip(pipeline_runs1, pipeline_runs2):
self.assertTrue(pipeline_run_module.PipelineRun.json_structure_equals(pipeline_run1, pipeline_run2))
def test_pipeline_run_json_structure_equals(self):
pipeline_run_save_path1 = os.path.join(self.test_dir, 'pipeline_run1.yml')
self._fit_iris_random_forest(pipeline_run_save_path=pipeline_run_save_path1)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path1)
pipeline_run_save_path2 = os.path.join(self.test_dir, 'pipeline_run2.yml')
self._fit_iris_random_forest(pipeline_run_save_path=pipeline_run_save_path2)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path2)
self._assert_pipeline_runs_equal(pipeline_run_save_path1, pipeline_run_save_path2)
def _cache_pipeline_for_rerun(self, pipeline_path, cache_dir=None):
"""make pipeline searchable by id in test_dir"""
with open(pipeline_path, 'r') as f:
pipeline = utils.yaml_load(f)
if cache_dir is None:
cache_dir = self.test_dir
temp_pipeline_path = os.path.join(cache_dir, pipeline['id'] + '.yml')
with open(temp_pipeline_path, 'w') as f:
utils.yaml_dump(pipeline, f)
@staticmethod
def _generate_seed():
return random.randint(2**31, 2**32-1)
def test_fit_rerun(self):
dataset_path = self._get_iris_dataset_path()
problem_path = self._get_iris_problem_path()
pipeline_path = self._get_random_forest_pipeline_path()
pipeline_run_save_path = self._get_pipeline_run_save_path()
problem = problem_module.get_problem(problem_path)
inputs = [dataset_module.get_dataset(dataset_path)]
with open(pipeline_path) as f:
pipeline = pipeline_module.Pipeline.from_yaml(f)
hyperparams = [{}, {}, {'n_estimators': 19}, {}]
random_seed = self._generate_seed()
with utils.silence():
fitted_pipeline, predictions, fit_result = runtime.fit(
pipeline, inputs, problem_description=problem, hyperparams=hyperparams,
random_seed=random_seed, context=metadata_base.Context.TESTING,
)
with open(pipeline_run_save_path, 'w') as f:
fit_result.pipeline_run.to_yaml(f)
self._cache_pipeline_for_rerun(pipeline_path)
pipeline_rerun_save_path = self._get_pipeline_rerun_save_path()
rerun_arg = [
'',
'--pipelines-path',
self.test_dir,
'runtime',
'--datasets',
TEST_DATA_DIR,
'fit',
'--input-run',
pipeline_run_save_path,
'--output-run',
pipeline_rerun_save_path,
]
self._call_cli_runtime_without_fail(rerun_arg)
self._assert_valid_saved_pipeline_runs(pipeline_rerun_save_path)
self._assert_pipeline_runs_equal(pipeline_run_save_path, pipeline_rerun_save_path)
def test_produce_rerun(self):
dataset_path = self._get_iris_dataset_path()
problem_path = self._get_iris_problem_path()
pipeline_path = self._get_random_forest_pipeline_path()
pipeline_run_save_path = self._get_pipeline_run_save_path()
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
arg = [
'',
'runtime',
'produce',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
dataset_path,
'--output-run',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._cache_pipeline_for_rerun(pipeline_path)
pipeline_rerun_save_path = self._get_pipeline_rerun_save_path()
rerun_arg = [
'',
'--pipelines-path',
self.test_dir,
'runtime',
'--datasets',
TEST_DATA_DIR,
'produce',
'--fitted-pipeline',
fitted_pipeline_path,
'--input-run',
pipeline_run_save_path,
'--output-run',
pipeline_rerun_save_path,
]
self._call_cli_runtime_without_fail(rerun_arg)
self._assert_valid_saved_pipeline_runs(pipeline_rerun_save_path)
self._assert_pipeline_runs_equal(pipeline_run_save_path, pipeline_rerun_save_path)
def _assert_scores_equal(self, scores_path, rescores_path):
scores = pandas.read_csv(scores_path)
rescores = pandas.read_csv(rescores_path)
self.assertTrue(scores.equals(rescores), '\n{}\n\n{}'.format(scores, rescores))
def _assert_scores_equal_pipeline_run(self, scores_path, pipeline_run_save_path):
scores = pandas.read_csv(scores_path)
scores.drop('fold', axis=1, inplace=True, errors='ignore')
scores_no_seed = scores.drop('randomSeed', axis=1, errors='ignore')
with open(pipeline_run_save_path) as f:
# TODO: always use -1?
pipeline_run = list(utils.yaml_load_all(f))[-1]
self.assertEqual(pipeline_run['run']['phase'], metadata_base.PipelineRunPhase.PRODUCE.name)
# TODO: clean up preprocessing?
pipeline_run_scores_df = pandas.DataFrame(pipeline_run['run']['results']['scores'])
# TODO: is it possible to make pipeline run schema more compatible with scores csv schema?
pipeline_run_scores_df['metric'] = pipeline_run_scores_df['metric'].map(lambda cell: cell['metric'])
pipeline_run_scores_df = pipeline_run_scores_df[scores_no_seed.columns.tolist()]
pandas.testing.assert_frame_equal(scores_no_seed, pipeline_run_scores_df)
self.assertEqual(scores['randomSeed'].iloc[0], pipeline_run['random_seed'])
def test_score_rerun(self):
dataset_path = self._get_iris_dataset_path()
problem_path = self._get_iris_problem_path()
pipeline_path = self._get_random_forest_pipeline_path()
pipeline_run_save_path = self._get_pipeline_run_save_path()
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
scores_path = os.path.join(self.test_dir, 'scores.csv')
random_seed = self._generate_seed()
metrics = runtime.get_metrics_from_list(['ACCURACY', 'F1_MACRO'])
scoring_params = {'add_normalized_scores': 'false'}
scoring_random_seed = self._generate_seed()
problem = problem_module.get_problem(problem_path)
inputs = [dataset_module.get_dataset(dataset_path)]
with open(pipeline_path) as f:
pipeline = pipeline_module.Pipeline.from_yaml(f)
with open(runtime.DEFAULT_SCORING_PIPELINE_PATH) as f:
scoring_pipeline = pipeline_module.Pipeline.from_yaml(f)
with utils.silence():
fitted_pipeline, predictions, fit_result = runtime.fit(
pipeline, inputs, problem_description=problem, random_seed=random_seed,
context=metadata_base.Context.TESTING,
)
with open(fitted_pipeline_path, 'wb') as f:
pickle.dump(fitted_pipeline, f)
predictions, produce_result = runtime.produce(fitted_pipeline, inputs)
scores, score_result = runtime.score(
predictions, inputs, scoring_pipeline=scoring_pipeline,
problem_description=problem, metrics=metrics, predictions_random_seed=random_seed,
context=metadata_base.Context.TESTING, scoring_params=scoring_params,
random_seed=scoring_random_seed
)
self.assertFalse(score_result.has_error(), score_result.error)
scores.to_csv(scores_path)
runtime.combine_pipeline_runs(
produce_result.pipeline_run, scoring_pipeline_run=score_result.pipeline_run, score_inputs=inputs,
metrics=metrics, scores=scores
)
with open(pipeline_run_save_path, 'w') as f:
produce_result.pipeline_run.to_yaml(f)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
dataframe = | pandas.read_csv(scores_path) | pandas.read_csv |
import functools
import itertools
import warnings
import imghdr
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from numpy.testing import assert_array_equal
from seaborn._core.plot import Plot
from seaborn._core.scales import Nominal, Continuous
from seaborn._core.rules import categorical_order
from seaborn._core.moves import Move
from seaborn._marks.base import Mark
from seaborn._stats.base import Stat
from seaborn.external.version import Version
assert_vector_equal = functools.partial(
# TODO do we care about int/float dtype consistency?
# Eventually most variables become floats ... but does it matter when?
# (Or rather, does it matter if it happens too early?)
assert_series_equal, check_names=False, check_dtype=False,
)
def assert_gridspec_shape(ax, nrows=1, ncols=1):
gs = ax.get_gridspec()
if Version(mpl.__version__) < Version("3.2"):
assert gs._nrows == nrows
assert gs._ncols == ncols
else:
assert gs.nrows == nrows
assert gs.ncols == ncols
class MockMark(Mark):
_grouping_props = ["color"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.passed_keys = []
self.passed_data = []
self.passed_axes = []
self.passed_scales = None
self.passed_orient = None
self.n_splits = 0
def _plot(self, split_gen, scales, orient):
for keys, data, ax in split_gen():
self.n_splits += 1
self.passed_keys.append(keys)
self.passed_data.append(data)
self.passed_axes.append(ax)
self.passed_scales = scales
self.passed_orient = orient
def _legend_artist(self, variables, value, scales):
a = mpl.lines.Line2D([], [])
a.variables = variables
a.value = value
return a
class TestInit:
def test_empty(self):
p = Plot()
assert p._data.source_data is None
assert p._data.source_vars == {}
def test_data_only(self, long_df):
p = Plot(long_df)
assert p._data.source_data is long_df
assert p._data.source_vars == {}
def test_df_and_named_variables(self, long_df):
variables = {"x": "a", "y": "z"}
p = Plot(long_df, **variables)
for var, col in variables.items():
assert_vector_equal(p._data.frame[var], long_df[col])
assert p._data.source_data is long_df
assert p._data.source_vars.keys() == variables.keys()
def test_df_and_mixed_variables(self, long_df):
variables = {"x": "a", "y": long_df["z"]}
p = Plot(long_df, **variables)
for var, col in variables.items():
if isinstance(col, str):
assert_vector_equal(p._data.frame[var], long_df[col])
else:
assert_vector_equal(p._data.frame[var], col)
assert p._data.source_data is long_df
assert p._data.source_vars.keys() == variables.keys()
def test_vector_variables_only(self, long_df):
variables = {"x": long_df["a"], "y": long_df["z"]}
p = Plot(**variables)
for var, col in variables.items():
assert_vector_equal(p._data.frame[var], col)
assert p._data.source_data is None
assert p._data.source_vars.keys() == variables.keys()
def test_vector_variables_no_index(self, long_df):
variables = {"x": long_df["a"].to_numpy(), "y": long_df["z"].to_list()}
p = Plot(**variables)
for var, col in variables.items():
assert_vector_equal(p._data.frame[var], pd.Series(col))
assert p._data.names[var] is None
assert p._data.source_data is None
assert p._data.source_vars.keys() == variables.keys()
def test_data_only_named(self, long_df):
p = Plot(data=long_df)
assert p._data.source_data is long_df
assert p._data.source_vars == {}
def test_positional_and_named_data(self, long_df):
err = "`data` given by both name and position"
with pytest.raises(TypeError, match=err):
Plot(long_df, data=long_df)
@pytest.mark.parametrize("var", ["x", "y"])
def test_positional_and_named_xy(self, long_df, var):
err = f"`{var}` given by both name and position"
with pytest.raises(TypeError, match=err):
Plot(long_df, "a", "b", **{var: "c"})
def test_positional_data_x_y(self, long_df):
p = Plot(long_df, "a", "b")
assert p._data.source_data is long_df
assert list(p._data.source_vars) == ["x", "y"]
def test_positional_x_y(self, long_df):
p = Plot(long_df["a"], long_df["b"])
assert p._data.source_data is None
assert list(p._data.source_vars) == ["x", "y"]
def test_positional_data_x(self, long_df):
p = Plot(long_df, "a")
assert p._data.source_data is long_df
assert list(p._data.source_vars) == ["x"]
def test_positional_x(self, long_df):
p = Plot(long_df["a"])
assert p._data.source_data is None
assert list(p._data.source_vars) == ["x"]
def test_positional_too_many(self, long_df):
err = r"Plot\(\) accepts no more than 3 positional arguments \(data, x, y\)"
with pytest.raises(TypeError, match=err):
Plot(long_df, "x", "y", "z")
def test_unknown_keywords(self, long_df):
err = r"Plot\(\) got unexpected keyword argument\(s\): bad"
with pytest.raises(TypeError, match=err):
Plot(long_df, bad="x")
class TestLayerAddition:
def test_without_data(self, long_df):
p = Plot(long_df, x="x", y="y").add(MockMark()).plot()
layer, = p._layers
assert_frame_equal(p._data.frame, layer["data"].frame, check_dtype=False)
def test_with_new_variable_by_name(self, long_df):
p = Plot(long_df, x="x").add(MockMark(), y="y").plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(layer["data"].frame[var], long_df[var])
def test_with_new_variable_by_vector(self, long_df):
p = Plot(long_df, x="x").add(MockMark(), y=long_df["y"]).plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(layer["data"].frame[var], long_df[var])
def test_with_late_data_definition(self, long_df):
p = Plot().add(MockMark(), data=long_df, x="x", y="y").plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(layer["data"].frame[var], long_df[var])
def test_with_new_data_definition(self, long_df):
long_df_sub = long_df.sample(frac=.5)
p = Plot(long_df, x="x", y="y").add(MockMark(), data=long_df_sub).plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(
layer["data"].frame[var], long_df_sub[var].reindex(long_df.index)
)
def test_drop_variable(self, long_df):
p = Plot(long_df, x="x", y="y").add(MockMark(), y=None).plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x"]
assert_vector_equal(layer["data"].frame["x"], long_df["x"], check_dtype=False)
@pytest.mark.xfail(reason="Need decision on default stat")
def test_stat_default(self):
class MarkWithDefaultStat(Mark):
default_stat = Stat
p = Plot().add(MarkWithDefaultStat())
layer, = p._layers
assert layer["stat"].__class__ is Stat
def test_stat_nondefault(self):
class MarkWithDefaultStat(Mark):
default_stat = Stat
class OtherMockStat(Stat):
pass
p = Plot().add(MarkWithDefaultStat(), OtherMockStat())
layer, = p._layers
assert layer["stat"].__class__ is OtherMockStat
@pytest.mark.parametrize(
"arg,expected",
[("x", "x"), ("y", "y"), ("v", "x"), ("h", "y")],
)
def test_orient(self, arg, expected):
class MockStatTrackOrient(Stat):
def __call__(self, data, groupby, orient, scales):
self.orient_at_call = orient
return data
class MockMoveTrackOrient(Move):
def __call__(self, data, groupby, orient):
self.orient_at_call = orient
return data
s = MockStatTrackOrient()
m = MockMoveTrackOrient()
Plot(x=[1, 2, 3], y=[1, 2, 3]).add(MockMark(), s, m, orient=arg).plot()
assert s.orient_at_call == expected
assert m.orient_at_call == expected
def test_variable_list(self, long_df):
p = Plot(long_df, x="x", y="y")
assert p._variables == ["x", "y"]
p = Plot(long_df).add(MockMark(), x="x", y="y")
assert p._variables == ["x", "y"]
p = Plot(long_df, y="x", color="a").add(MockMark(), x="y")
assert p._variables == ["y", "color", "x"]
p = Plot(long_df, x="x", y="y", color="a").add(MockMark(), color=None)
assert p._variables == ["x", "y", "color"]
p = (
Plot(long_df, x="x", y="y")
.add(MockMark(), color="a")
.add(MockMark(), alpha="s")
)
assert p._variables == ["x", "y", "color", "alpha"]
p = Plot(long_df, y="x").pair(x=["a", "b"])
assert p._variables == ["y", "x0", "x1"]
def test_type_checks(self):
p = Plot()
with pytest.raises(TypeError, match="mark must be a Mark instance"):
p.add(MockMark)
class MockStat(Stat):
pass
with pytest.raises(TypeError, match="stat must be a Stat instance"):
p.add(MockMark(), MockStat)
class TestScaling:
def test_inference(self, long_df):
for col, scale_type in zip("zat", ["continuous", "nominal", "temporal"]):
p = Plot(long_df, x=col, y=col).add(MockMark()).plot()
for var in "xy":
assert p._scales[var].scale_type == scale_type
def test_inference_from_layer_data(self):
p = Plot().add(MockMark(), x=["a", "b", "c"]).plot()
assert p._scales["x"]("b") == 1
def test_inference_joins(self):
p = (
Plot(y=pd.Series([1, 2, 3, 4]))
.add(MockMark(), x=pd.Series([1, 2]))
.add(MockMark(), x=pd.Series(["a", "b"], index=[2, 3]))
.plot()
)
assert p._scales["x"]("a") == 2
def test_inferred_categorical_converter(self):
p = Plot(x=["b", "c", "a"]).add(MockMark()).plot()
ax = p._figure.axes[0]
assert ax.xaxis.convert_units("c") == 1
def test_explicit_categorical_converter(self):
p = Plot(y=[2, 1, 3]).scale(y=Nominal()).add(MockMark()).plot()
ax = p._figure.axes[0]
assert ax.yaxis.convert_units("3") == 2
@pytest.mark.xfail(reason="Temporal auto-conversion not implemented")
def test_categorical_as_datetime(self):
dates = ["1970-01-03", "1970-01-02", "1970-01-04"]
p = Plot(x=dates).scale(...).add(MockMark()).plot()
p # TODO
...
def test_faceted_log_scale(self):
p = Plot(y=[1, 10]).facet(col=["a", "b"]).scale(y="log").plot()
for ax in p._figure.axes:
xfm = ax.yaxis.get_transform().transform
assert_array_equal(xfm([1, 10, 100]), [0, 1, 2])
def test_paired_single_log_scale(self):
x0, x1 = [1, 2, 3], [1, 10, 100]
p = Plot().pair(x=[x0, x1]).scale(x1="log").plot()
ax_lin, ax_log = p._figure.axes
xfm_lin = ax_lin.xaxis.get_transform().transform
assert_array_equal(xfm_lin([1, 10, 100]), [1, 10, 100])
xfm_log = ax_log.xaxis.get_transform().transform
assert_array_equal(xfm_log([1, 10, 100]), [0, 1, 2])
@pytest.mark.xfail(reason="Custom log scale needs log name for consistency")
def test_log_scale_name(self):
p = Plot().scale(x="log").plot()
ax = p._figure.axes[0]
assert ax.get_xscale() == "log"
assert ax.get_yscale() == "linear"
def test_mark_data_log_transform_is_inverted(self, long_df):
col = "z"
m = MockMark()
Plot(long_df, x=col).scale(x="log").add(m).plot()
assert_vector_equal(m.passed_data[0]["x"], long_df[col])
def test_mark_data_log_transfrom_with_stat(self, long_df):
class Mean(Stat):
group_by_orient = True
def __call__(self, data, groupby, orient, scales):
other = {"x": "y", "y": "x"}[orient]
return groupby.agg(data, {other: "mean"})
col = "z"
grouper = "a"
m = MockMark()
s = Mean()
Plot(long_df, x=grouper, y=col).scale(y="log").add(m, s).plot()
expected = (
long_df[col]
.pipe(np.log)
.groupby(long_df[grouper], sort=False)
.mean()
.pipe(np.exp)
.reset_index(drop=True)
)
assert_vector_equal(m.passed_data[0]["y"], expected)
def test_mark_data_from_categorical(self, long_df):
col = "a"
m = MockMark()
Plot(long_df, x=col).add(m).plot()
levels = categorical_order(long_df[col])
level_map = {x: float(i) for i, x in enumerate(levels)}
assert_vector_equal(m.passed_data[0]["x"], long_df[col].map(level_map))
def test_mark_data_from_datetime(self, long_df):
col = "t"
m = MockMark()
Plot(long_df, x=col).add(m).plot()
expected = long_df[col].map(mpl.dates.date2num)
if Version(mpl.__version__) < Version("3.3"):
expected = expected + mpl.dates.date2num(np.datetime64('0000-12-31'))
assert_vector_equal(m.passed_data[0]["x"], expected)
def test_facet_categories(self):
m = MockMark()
p = Plot(x=["a", "b", "a", "c"]).facet(col=["x", "x", "y", "y"]).add(m).plot()
ax1, ax2 = p._figure.axes
assert len(ax1.get_xticks()) == 3
assert len(ax2.get_xticks()) == 3
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 2.], [2, 3]))
def test_facet_categories_unshared(self):
m = MockMark()
p = (
Plot(x=["a", "b", "a", "c"])
.facet(col=["x", "x", "y", "y"])
.configure(sharex=False)
.add(m)
.plot()
)
ax1, ax2 = p._figure.axes
assert len(ax1.get_xticks()) == 2
assert len(ax2.get_xticks()) == 2
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 1.], [2, 3]))
def test_facet_categories_single_dim_shared(self):
data = [
("a", 1, 1), ("b", 1, 1),
("a", 1, 2), ("c", 1, 2),
("b", 2, 1), ("d", 2, 1),
("e", 2, 2), ("e", 2, 1),
]
df = pd.DataFrame(data, columns=["x", "row", "col"]).assign(y=1)
m = MockMark()
p = (
Plot(df, x="x")
.facet(row="row", col="col")
.add(m)
.configure(sharex="row")
.plot()
)
axs = p._figure.axes
for ax in axs:
assert ax.get_xticks() == [0, 1, 2]
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 2.], [2, 3]))
assert_vector_equal(m.passed_data[2]["x"], pd.Series([0., 1., 2.], [4, 5, 7]))
assert_vector_equal(m.passed_data[3]["x"], pd.Series([2.], [6]))
def test_pair_categories(self):
data = [("a", "a"), ("b", "c")]
df = pd.DataFrame(data, columns=["x1", "x2"]).assign(y=1)
m = MockMark()
p = Plot(df, y="y").pair(x=["x1", "x2"]).add(m).plot()
ax1, ax2 = p._figure.axes
assert ax1.get_xticks() == [0, 1]
assert ax2.get_xticks() == [0, 1]
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 1.], [0, 1]))
@pytest.mark.xfail(
Version(mpl.__version__) < Version("3.4.0"),
reason="Sharing paired categorical axes requires matplotlib>3.4.0"
)
def test_pair_categories_shared(self):
data = [("a", "a"), ("b", "c")]
df = pd.DataFrame(data, columns=["x1", "x2"]).assign(y=1)
m = MockMark()
p = Plot(df, y="y").pair(x=["x1", "x2"]).add(m).configure(sharex=True).plot()
for ax in p._figure.axes:
assert ax.get_xticks() == [0, 1, 2]
print(m.passed_data)
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 2.], [0, 1]))
def test_identity_mapping_linewidth(self):
m = MockMark()
x = y = [1, 2, 3, 4, 5]
lw = pd.Series([.5, .1, .1, .9, 3])
Plot(x=x, y=y, linewidth=lw).scale(linewidth=None).add(m).plot()
assert_vector_equal(m.passed_scales["linewidth"](lw), lw)
def test_pair_single_coordinate_stat_orient(self, long_df):
class MockStat(Stat):
def __call__(self, data, groupby, orient, scales):
self.orient = orient
return data
s = MockStat()
Plot(long_df).pair(x=["x", "y"]).add(MockMark(), s).plot()
assert s.orient == "x"
def test_inferred_nominal_passed_to_stat(self):
class MockStat(Stat):
def __call__(self, data, groupby, orient, scales):
self.scales = scales
return data
s = MockStat()
y = ["a", "a", "b", "c"]
Plot(y=y).add(MockMark(), s).plot()
assert s.scales["y"].scale_type == "nominal"
# TODO where should RGB consistency be enforced?
@pytest.mark.xfail(
reason="Correct output representation for color with identity scale undefined"
)
def test_identity_mapping_color_strings(self):
m = MockMark()
x = y = [1, 2, 3]
c = ["C0", "C2", "C1"]
Plot(x=x, y=y, color=c).scale(color=None).add(m).plot()
expected = mpl.colors.to_rgba_array(c)[:, :3]
assert_array_equal(m.passed_scales["color"](c), expected)
def test_identity_mapping_color_tuples(self):
m = MockMark()
x = y = [1, 2, 3]
c = [(1, 0, 0), (0, 1, 0), (1, 0, 0)]
Plot(x=x, y=y, color=c).scale(color=None).add(m).plot()
expected = mpl.colors.to_rgba_array(c)[:, :3]
assert_array_equal(m.passed_scales["color"](c), expected)
@pytest.mark.xfail(
reason="Need decision on what to do with scale defined for unused variable"
)
def test_undefined_variable_raises(self):
p = Plot(x=[1, 2, 3], color=["a", "b", "c"]).scale(y=Continuous())
err = r"No data found for variable\(s\) with explicit scale: {'y'}"
with pytest.raises(RuntimeError, match=err):
p.plot()
class TestPlotting:
def test_matplotlib_object_creation(self):
p = Plot().plot()
assert isinstance(p._figure, mpl.figure.Figure)
for sub in p._subplots:
assert isinstance(sub["ax"], mpl.axes.Axes)
def test_empty(self):
m = MockMark()
Plot().plot()
assert m.n_splits == 0
def test_single_split_single_layer(self, long_df):
m = MockMark()
p = Plot(long_df, x="f", y="z").add(m).plot()
assert m.n_splits == 1
assert m.passed_keys[0] == {}
assert m.passed_axes == [sub["ax"] for sub in p._subplots]
for col in p._data.frame:
| assert_series_equal(m.passed_data[0][col], p._data.frame[col]) | pandas.testing.assert_series_equal |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, True),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
# (cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), True),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
# (cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), True),
# (cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_struct_dtype(obj, expect):
# TODO: All inputs of interval types are currently disabled due to
# inconsistent behavior of is_struct_dtype for interval types that will be
# fixed as part of the array refactor.
assert types.is_struct_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_decimal_dtype(obj, expect):
assert types.is_decimal_dtype(obj) == expect
@pytest.mark.parametrize(
"obj",
(
# Base Python objects.
bool(),
int(),
float(),
complex(),
str(),
"",
r"",
object(),
# Base Python types.
bool,
int,
float,
complex,
str,
object,
# NumPy types.
np.bool_,
np.int_,
np.float64,
np.complex128,
np.str_,
np.unicode_,
np.datetime64,
np.timedelta64,
# NumPy scalars.
np.bool_(),
np.int_(),
np.float64(),
np.complex128(),
np.str_(),
np.unicode_(),
np.datetime64(),
np.timedelta64(),
# NumPy dtype objects.
np.dtype("bool"),
np.dtype("int"),
np.dtype("float"),
np.dtype("complex"),
np.dtype("str"),
np.dtype("unicode"),
np.dtype("datetime64"),
np.dtype("timedelta64"),
np.dtype("object"),
# NumPy arrays.
np.array([], dtype=np.bool_),
np.array([], dtype=np.int_),
np.array([], dtype=np.float64),
np.array([], dtype=np.complex128),
np.array([], dtype=np.str_),
np.array([], dtype=np.unicode_),
np.array([], dtype=np.datetime64),
np.array([], dtype=np.timedelta64),
np.array([], dtype=object),
# Pandas dtypes.
# TODO: pandas does not consider these to be categoricals.
# pd.core.dtypes.dtypes.CategoricalDtypeType,
# pd.CategoricalDtype,
# Pandas objects.
pd.Series(dtype="bool"),
pd.Series(dtype="int"),
pd.Series(dtype="float"),
pd.Series(dtype="complex"),
pd.Series(dtype="str"),
pd.Series(dtype="unicode"),
pd.Series(dtype="datetime64[s]"),
pd.Series(dtype="timedelta64[s]"),
| pd.Series(dtype="category") | pandas.Series |
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import datetime as dt
import importlib
import json
import logging
import time
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy.sql.sqltypes import TIMESTAMP, VARCHAR, FLOAT, INTEGER
import iotfunctions
from . import db as db_module
from .automation import (TimeSeriesGenerator, DateGenerator, MetricGenerator, CategoricalGenerator)
from .exceptions import StageException
from .pipeline import (CalcPipeline, DropNull, JobController, JobLogNull, Trace, AggregateItems)
from .stages import (DataReader, DataWriter, DataWriterFile)
from .util import (MemoryOptimizer, build_grouper, categorize_args, reset_df_index)
logger = logging.getLogger(__name__)
def retrieve_entity_type_metadata(raise_error=True, **kwargs):
"""
Get server metadata for entity type
"""
db = kwargs['_db']
# get kpi functions metadata
meta = db.http_request(object_type='engineInput', object_name=kwargs['logical_name'], request='GET',
raise_error=raise_error)
try:
meta = json.loads(meta)
except (TypeError, json.JSONDecodeError):
meta = None
if meta is None or 'exception' in meta:
raise RuntimeError(('API call to server did not retrieve valid entity '
' type properties for %s.' % kwargs['logical_name']))
if meta['kpiDeclarations'] is None:
meta['kpiDeclarations'] = []
logger.warning(('This entity type has no calculated kpis'))
# cache function catalog metadata in the db object
function_list = [x['functionName'] for x in meta['kpiDeclarations']]
db.load_catalog(install_missing=True, function_list=function_list)
# map server properties
params = {}
params['_entity_type_id'] = meta['entityTypeId']
params['_db_schema'] = meta['schemaName']
params['name'] = meta['metricsTableName']
params['_timestamp'] = meta['metricTimestampColumn']
params['_dimension_table_name'] = meta['dimensionsTable']
params['_data_items'] = meta['dataItems']
# constants
c_meta = db.http_request(object_type='constants', object_name=kwargs['logical_name'], request='GET')
try:
c_meta = json.loads(c_meta)
except (TypeError, json.JSONDecodeError):
logger.debug(('API call to server did not retrieve valid entity type'
' properties. No properties set.'))
else:
for p in c_meta:
key = p['name']
if isinstance(p['value'], dict):
params[key] = p['value'].get('value', p['value'])
else:
params[key] = p['value']
logger.debug('Retrieved server constant %s with value %s', key, params[key])
params = {**kwargs, **params}
return (params, meta)
class EntityType(object):
"""
Data is organised around Entity Types. Entity Types have one or more
physical database object for their data. When creating a new Entity Type,
it will attempt to connect itself to a table of the same name in the
database. If no table exists the Entity Type will create one.
Entity types describe the payload of an AS job. A job is built by a
JobController using functions metadata prepared by the Entity Type.
Metadata prepared is:
_functions:
List of function objects
_data_items:
List of data items and all of their metadata such as their
datatype.
_granularities_dict:
Dictionary keyed on granularity name. Contains a granularity object
that provides access to granularity metadata such as the time
level and other dimensions involved in the aggregation.
_schedules_dict:
Dictionary keyed on a schedule frequency containing other metadata
about the operations to be run at this frequency, e.g. how many days
should be backtracked when retrieving daat.
Entity types may be initialized as client objects for local testing
or may be loaded from the server. After initialization all of the
above instance variables will be populated. The metadata looks the same
regardless of whether the entity type was loaded from the server
or initialized on the client. The logic to build the metadata is
different though.
Parameters
----------
name: str
Name of the entity type. Use lower case. Will be used as the physical
database table name so don't use database reserved works of special
characters.
db: Database object
Contains the connection info for the database
*args:
Additional positional arguments are used to add the list of SQL Alchemy
Column objects contained within this table. Similar to the style of a
CREATE TABLE sql statement. There is no need to specify column names
if you are using an existing database table as an entity type.
**kwargs
Additional keywork args.
_timestamp: str
Overide the timestamp column name from the default of 'evt_timestamp'
"""
is_entity_type = True
is_local = False
auto_create_table = True
aggregate_complete_periods = True # align data for aggregation with time grain to avoid partial periods
log_table = 'KPI_LOGGING' # deprecated, to be removed
checkpoint_table = 'KPI_CHECKPOINT' # deprecated,to be removed
chunk_size = None # use job controller default chunk
default_backtrack = None
trace_df_changes = True
drop_existing = False
# These two columns will be available in the dataframe of a pipeline
_entity_id = 'deviceid' # identify the instance
_timestamp_col = '_timestamp' # copy of the event timestamp from the index
# This column will identify an instance in the index
_df_index_entity_id = 'id'
# when automatically creating a new dimension, use this suffix
_auto_dim_suffix = '_auto_dim'
# when looking for an automatically created numeric index it should be named:
auto_index_name = '_auto_index_'
# constants declared as part of an entity type definition
ui_constants = None
_functions = None
# generator
_scd_frequency = '2D' # deprecated. Use parameters on EntityDataGenerator
_activity_frequency = '3D' # deprecated. Use parameters on EntityDataGenerator
_start_entity_id = 73000 # deprecated. Use parameters on EntityDataGenerator
_auto_entity_count = 5 # deprecated. Use parameters on EntityDataGenerator
# pipeline work variables stages
_dimension_table = None
_scd_stages = None
_custom_calendar = None
# variabes that will be set when loading from the server
_entity_type_id = None
logical_name = None
_timestamp = 'evt_timestamp'
_dimension_table_name = None
_db_connection_dbi = None
_db_schema = None
_data_items = None
tenant_id = None
_entity_filter_list = None
_start_ts_override = None
_end_ts_override = None
_stages = None
_schedules_dict = None
_granularities_dict = None
_input_set = None
_output_list = None
_invalid_stages = None
_disabled_stages = None
# processing defaults
_pre_aggregate_time_grain = None # aggregate incoming data before processing
_auto_read_from_ts_table = True # read new data from designated time series table for the entity
_pre_agg_rules = None # pandas agg dictionary containing list of aggregates to apply for each item
_pre_agg_outputs = None # dictionary containing list of output items names for each item
_data_reader = DataReader
_abort_on_fail = False
_auto_save_trace = 30
save_trace_to_file = False
drop_null_class = DropNull
enable_downcast = False
allow_projection_list_trim = True
_write_usage = False
# deprecated class variables (to be removed)
_checkpoint_by_entity = True # manage a separate checkpoint for each entity instance
_is_initial_transform = True
_is_preload_complete = False
def __init__(self, name, db, *args, **kwargs):
logger.debug('Initializing new entity type using iotfunctions %s', iotfunctions.__version__)
try:
self.logical_name = kwargs.get('logical_name', None)
if self.logical_name is None:
self.logical_name = name
except AttributeError:
self.logical_name = name
if db == None:
name = 'None'
elif db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
self.name = name
self.description = kwargs.get('description', None)
if self.description is None:
self.description = ''
else:
del (kwargs['description'])
self.activity_tables = {}
self.scd = {}
self.db = db
if self.db is not None:
self.tenant_id = self.db.tenant_id
self._system_columns = [self._entity_id, self._timestamp_col, 'logicalinterface_id', 'devicetype', 'format',
'updated_utc', self._timestamp]
self._stage_type_map = self.default_stage_type_map()
self._custom_exclude_col_from_auto_drop_nulls = []
self._drop_all_null_rows = True
if self._scd_stages is None:
self._scd_stages = []
if self._data_items is None:
self._data_items = []
if self._granularities_dict is None:
self._granularities_dict = {}
# additional params set from kwargs
self.set_params(**kwargs)
# Start a trace to record activity on the entity type
self._trace = Trace(object_name=None, parent=self, db=db)
if self._disabled_stages is None:
self._disabled_stages = []
if self._invalid_stages is None:
self._invalid_stages = []
if len(self._disabled_stages) > 0 or len(self._invalid_stages) > 0:
self.trace_append(created_by=self, msg='Skipping disabled and invalid stages', log_method=logger.info,
**{'skipped_disabled_stages': [s['functionName'] for s in self._disabled_stages],
'skipped_disabled_data_items': [s['output'] for s in self._disabled_stages],
'skipped_invalid_stages': [s['functionName'] for s in self._invalid_stages],
'skipped_invalid_data_items': [s['output'] for s in self._invalid_stages]})
# attach to time series table
if self._db_schema is None:
logger.warning(('No _db_schema specified in **kwargs. Using'
'default database schema.'))
self._mandatory_columns = [self._timestamp, self._entity_id]
# separate args into categories
categories = [('constant', 'is_ui_control', None), ('granularity', 'is_granularity', None),
('function', 'is_function', None), ('column', None, Column)]
categorized = categorize_args(categories, 'functions', *args)
cols = list(categorized.get('column', []))
functions = list(categorized.get('function', []))
constants = list(categorized.get('constant', []))
grains = list(categorized.get('granularity', []))
if self.drop_existing and db is not None and not self.is_local:
self.drop_tables()
# create a database table if needed using cols
if name is not None and db is not None and not self.is_local:
try:
self.table = self.db.get_table(self.name, self._db_schema)
except KeyError:
if self.auto_create_table:
ts = db_module.TimeSeriesTable(self.name, self.db, *cols, **kwargs)
self.table = ts.table
# self.db.create()
msg = 'Create table %s' % self.name
logger.info(msg)
else:
msg = ('Database table %s not found. Unable to create'
' entity type instance. Provide a valid table name'
' or use the auto_create_table = True keyword arg'
' to create a table. ' % (self.name))
raise ValueError(msg)
# populate the data items metadata from the supplied columns
if isinstance(self._data_items, list) and len(self._data_items) == 0:
self._data_items = self.build_item_metadata(self.table)
else:
logger.warning((
'Created a logical entity type. It is not connected to a real database table, so it cannot perform any database operations.'))
# add granularities
for g in grains:
logger.debug('Adding granularity to entity type: %s', g.name)
self._granularities_dict[g.name] = g
# add constants
self.ui_constants = constants
self.build_ui_constants()
# _functions
# functions may have been provided as kwarg and may be includes as args
# compbine all
if self._functions is None:
self._functions = []
self._functions.extend(functions)
if name is not None and db is not None and not self.is_local:
db.entity_type_metadata[self.logical_name] = self
logger.debug(('Initialized entity type %s'), str(self))
def add_activity_table(self, name, activities, *args, **kwargs):
"""
add an activity table for this entity type.
parameters
----------
name: str
table name
activities: list of strs
activity type codes: these identify the nature of the activity, e.g. PM is Preventative Maintenance
*args: Column objects
other columns describing the activity, e.g. materials_cost
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to add activity tables ')
raise ValueError(msg)
kwargs['_activities'] = activities
kwargs['schema'] = self._db_schema
# name = name.lower()
if self.db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
table = db_module.ActivityTable(name, self.db, *args, **kwargs)
try:
sqltable = self.db.get_table(name, self._db_schema)
except KeyError:
table.create()
self.activity_tables[name] = table
def add_slowly_changing_dimension(self, property_name, datatype, **kwargs):
"""
add a slowly changing dimension table containing a single property for this entity type
parameters
----------
property_name : str
name of property, e.g. firmware_version (lower case, no database reserved words)
datatype: sqlalchemy datatype
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to add slowly changing dimensions ')
raise ValueError(msg)
property_name = property_name.lower()
name = '%s_scd_%s' % (self.name, property_name)
kwargs['schema'] = self._db_schema
if self.db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
table = db_module.SlowlyChangingDimension(name=name, database=self.db, property_name=property_name,
datatype=datatype, **kwargs)
try:
self.db.get_table(name, self._db_schema)
except KeyError:
table.create()
self.scd[property_name] = table
def _add_scd_pipeline_stage(self, scd_lookup):
self._scd_stages.append(scd_lookup)
def build_agg_dict_from_meta_list(self, meta_list):
agg_dict = OrderedDict()
input_items = set()
output_items = []
for f in meta_list:
input_item = f['input'].get('source')
output_item = f['output'].get('name')
aggregate = f['functionName']
try:
agg_dict[input_item].append(aggregate)
except KeyError:
agg_dict[input_item] = [aggregate]
input_items.add(input_item)
output_items.append(output_item)
return (agg_dict, input_items, output_items)
def build_arg_metadata(self, obj):
"""
Examine the metadata provided by build_ui() to understand more about
the arguments to a function.
Place the values of inputs and outputs into 2 dicts
Return these two dicts in a tuple along with an output_meta dict
that contains argument values and types
Build the _input_set and _output list. These describe the set of
data items required as inputs to a function and the list of data
items produced by the function.
"""
name = obj.__class__.__name__
try:
(inputs, outputs) = obj.build_ui()
except (AttributeError, NotImplementedError) as e:
try:
fn_metadata = obj.metadata()
inputs = fn_metadata.get('input', None)
outputs = fn_metadata.get('output', None)
except (AttributeError, KeyError) as ex:
msg = ('Can\'t get metadata for function %s. Implement the'
' build_ui() method for this function. %s' % (name, str(e)))
raise NotImplementedError(msg)
input_args = {} # this is not used. Included only to maintain compatibility of return signature
output_args = {} # this is not used. Included only to maintain compatibility of return signature
output_meta = {} # this is not used. Included only to maintain compatibility of return signature
output_list = []
# There are two ways to gather inputs to a function.
# 1) from the arguments of the function
# 2) from the an explicit list of items returned by the get_input_items
# method
try:
input_set = set(obj.get_input_items())
except AttributeError:
input_set = set()
else:
if len(input_set) > 0:
logger.debug(('Function %s has explicit required input items '
' delivered by the get_input_items() method: %s'), name, input_set)
if not isinstance(inputs, list):
raise TypeError(('Function registration metadata must be defined',
' using a list of objects derived from iotfunctions',
' BaseUIControl. Check metadata for %s'
' %s ' % (name, inputs)))
if not isinstance(outputs, list):
raise TypeError(('Function registration metadata must be defined',
' using a list of objects derived from iotfunctions',
' BaseUIControl. Check metadata for %s'
' %s ' % (name, outputs)))
args = []
args.extend(inputs)
args.extend(outputs)
for a in args:
try:
# get arg name and type from UI object
type_ = a.type_
arg = a.name
except AttributeError as e:
try:
# get arg name and type from legacy dict
type_ = a.get('type', None)
arg = a.get('name', None)
except AttributeError:
type_ = None
arg = None
if type_ is None or arg is None:
msg = ('Error while getting metadata from function. The inputs'
' and outputs of the function are not described correctly'
' using UIcontrols with a type_ %s and name %s' % (type_, arg))
raise TypeError(msg)
arg_value = getattr(obj, arg)
out_arg = None
out_arg_value = None
if type_ == 'DATA_ITEM':
# the argument is an input that contains a data item or
# list of data items
if isinstance(arg_value, list):
input_set |= set(arg_value)
else:
input_set.add(arg_value)
logger.debug('Using input items %s for %s', arg_value, arg)
elif type_ == 'OUTPUT_DATA_ITEM':
# the arg is an output item or list of them
out_arg = arg
out_arg_value = arg_value
# some inputs implicitly describe outputs
try:
out_arg = a.output_item
except AttributeError:
pass # no need to check legacy dict for this property as it was not supported in the legacy dict
else:
if out_arg is not None:
out_arg_value = getattr(obj, out_arg)
# process output args
if out_arg is not None:
if isinstance(out_arg_value, list):
output_list.extend(out_arg_value)
else:
output_list.append(out_arg_value)
logger.debug('Using output items %s for %s', out_arg_value, out_arg)
# output_meta is present in the AS metadata structure, but not
# currently produced for local functions
return (input_args, output_args, output_meta, input_set, output_list)
def build_ui_constants(self):
"""
Build attributes for each ui constants declared with the entity type
"""
if self.ui_constants is None:
logger.debug('No constants declared in entity definition')
self.ui_constants = []
params = {}
for c in self.ui_constants:
try:
params[c.name] = c.default
except AttributeError:
logger.warning(('Cannot set value of parameter %s as it does'
' not have a default value'), c.name)
self.set_params(**params)
def build_flat_stage_list(self):
"""
Build a flat list of all function objects defined for entity type
"""
stages = []
for stage in self._functions:
try:
is_system = stage.is_system_function
except AttributeError:
is_system = False
logger.warning(('Function %s has no is_system_function property.'
' This means it was not inherited from '
' an iotfunctions base class. AS authors are'
' strongly encouraged to always inherit '
' from iotfunctions base classes'), stage.__class__.__name__)
if not is_system:
stages.append(stage)
return stages
def build_granularities(self, grain_meta, freq_lookup):
"""
Convert AS granularity metadata to granularity objects.
"""
out = {}
for g in grain_meta:
grouper = []
freq = None
entity_id = None
if g['entityFirst']:
grouper.append(pd.Grouper(key=self._entity_id))
entity_id = self._entity_id
if g['frequency'] is not None:
freq = (self.get_grain_freq(g['frequency'], freq_lookup, None))
if freq is None:
raise ValueError(('Invalid frequency name %s. The frequency name'
' must exist in the frequency lookup %s' % (g['frequency'], freq_lookup)))
# add a number to the frequency to make it compatible with pd.Timedelta
if freq[0] not in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
freq = '1' + freq
grouper.append(pd.Grouper(key=self._timestamp, freq=freq))
custom_calendar = None
custom_calendar_keys = []
dimensions = []
# differentiate between dimensions and custom calendar items
for d in g['dataItems']:
grouper.append(pd.Grouper(key=d))
if self._custom_calendar is not None:
if d in self._custom_calendar._output_list:
custom_calendar_keys.append(d)
dimensions.append(d)
granularity = Granularity(name=g['name'], grouper=grouper, dimensions=dimensions,
entity_name=self.logical_name, timestamp=self._timestamp, entity_id=entity_id,
custom_calendar_keys=custom_calendar_keys, freq=freq,
custom_calendar=custom_calendar)
out[g['name']] = granularity
return out
def build_item_metadata(self, table):
"""
Build a client generated version of AS server metadata from a
sql alachemy table object.
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' cannot build item metadata from tables ')
raise ValueError(msg)
for col_name, col in list(table.c.items()):
item = {}
if col_name not in self.get_excluded_cols():
item['name'] = col_name
item['type'] = 'METRIC'
item['parentDataItem'] = None
item['kpiFunctionDto'] = None
item['columnName'] = col.name
item['columnType'] = self.db.get_as_datatype(col)
item['sourceTableName'] = self.name
item['tags'] = []
item['transient'] = False
self._data_items.append(item)
return self._data_items
def build_schedules(self, metadata):
"""
Build a dictionary of schedule metadata from the schedules contained
within function definitions.
The schedule dictionary is keyed on a pandas freq string. This
frequency denotes the schedule interval. The dictionary contains a
tuple (start_hour,start_minute,backtrack_days)
Returns
-------
tuple containing updated metadata and a dict of schedules
Example
-------
{ '5min': [16,3,7] }
5 minute schedule interval with a start time of 4:03pm and backtrack of 7 days.
"""
freqs = {}
for f in metadata:
if f['schedule'] is not None:
freq = f['schedule']['every']
start = time.strptime(f['schedule']['starting_at'], '%H:%M:%S')
start_hour = start[3]
start_min = start[4]
backtrack = f['backtrack']
if backtrack is not None:
backtrack_days = backtrack.get('days', 0) + (backtrack.get('hours', 0) / 24) + (
backtrack.get('minutes', 0) / 1440)
else:
backtrack_days = None
existing_schedule = freqs.get(freq, None)
if existing_schedule is None:
f[freq] = (start_hour, start_min, backtrack_days)
else:
corrected_schedule = list(existing_schedule)
if existing_schedule[0] > start_hour:
corrected_schedule[0] = start_hour
logger.warning(('There is a conflict in the schedule metadata.'
' Picked the earlier start hour of %s instead of %s'
' for schedule %s.' % (start_hour, corrected_schedule[0], freq)))
if existing_schedule[1] > start_min:
corrected_schedule[1] = start_min
logger.warning(('There is a conflict in the schedule metadata.'
' Picked the earlier start minute of %s instead of %s'
' for schedule %s.' % (start_min, existing_schedule[1], freq)))
if backtrack_days is not None:
if existing_schedule[2] is None or existing_schedule[2] < backtrack_days:
corrected_schedule[2] = backtrack_days
logger.warning(('There is a conflict in the schedule metadata.'
' Picked the longer backtrack of %s instead of %s'
' for schedule %s.' % (backtrack_days, existing_schedule[2], freq)))
f[freq] = tuple(corrected_schedule)
freqs[freq] = f[freq]
f['schedule'] = freq
return freqs
def classify_stages(self):
"""
Create a dictionary of stage objects. Dictionary is keyed by
stage type and a granularity obj. It contains a list of stage
objects. Stages are classified by timing of execution, ie: preload,
get_data, transform, aggregate
"""
logger.debug('Classifying stages by timing of execution, ie: preload, get_data, transform, aggregate')
stage_metadata = dict()
active_granularities = set()
# Add a data_reader stage. This will read entity data.
if self._auto_read_from_ts_table:
auto_reader = self._data_reader(name='read_entity_data', obj=self)
stage_type = self.get_stage_type(auto_reader)
granularity = None # input level
stage_metadata[(stage_type, granularity)] = [auto_reader]
auto_reader.schedule = None
auto_reader._entity_type = self
else:
logger.debug(('Skipped auto read of payload data as'
' payload does not have _auto_read_from_ts_table'
' set to True'))
# Build a stage for each function.
for s in self._functions:
# replace deprecated function
obj = self.get_replacement(s)
# add metadata to stage
try:
obj.name
except AttributeError:
obj.name = obj.__class__.__name__
try:
obj._schedule
except AttributeError:
obj._schedule = None
try:
obj.granularity
except AttributeError:
obj.granularity = None
# the stage needs to know what entity type it belongs to
obj._entity_type = self
# classify stage
stage_type = self.get_stage_type(obj)
granularity = obj.granularity
if granularity is not None and isinstance(granularity, str):
granularity = self._granularities_dict.get(granularity, False)
if not granularity:
msg = ('Cannot build stage metdata. The granularity metadata'
' is invalid. Granularity of function is %s. Valid '
' granularities are %s' % (granularity, list(self._granularities_dict.keys())))
raise StageException(msg, obj.name)
elif isinstance(granularity, Granularity):
pass
else:
granularity = None
try:
# add to stage_type / granularity
stage_metadata[(stage_type, granularity)].append(obj)
except KeyError:
# start a new stage_type / granularity
stage_metadata[(stage_type, granularity)] = [obj]
# Remember all active granularities
if granularity is not None:
active_granularities.add(granularity)
# add metadata derived from function registration and function args
# input set and output list are critical metadata for the dependency model
# there are three ways to set them
# 1) using the instance variables _input_set and _output_list
# 2) using the methods get_input_set and get_output_list
# 3) using the function's registration metadata
if obj._input_set is not None:
logger.debug('Input set was preset for function %s', obj.name)
input_set = obj._input_set
else:
try:
input_set = obj.get_input_set()
except AttributeError:
input_set = None
if obj._output_list is not None:
logger.debug('Output list set was preset for function %s', obj.name)
output_list = obj._output_list
else:
try:
output_list = obj.get_output_list()
except AttributeError:
output_list = None
if input_set is None or output_list is None:
# get the input set and output list from the function argument metadata
(in_, out, out_meta, reg_input_set, reg_output_list) = self.build_arg_metadata(obj)
if input_set is None:
input_set = reg_input_set
if output_list is None:
output_list = reg_output_list
# set the _input_set and _output_list
obj._input_set = input_set
obj._output_list = output_list
# The stage may have metadata parameters that need to be
# copied onto the entity type
try:
entity_metadata = obj._metadata_params
except AttributeError:
entity_metadata = {}
logger.debug(('Function %s has no _metadata_params'
' property. This property allows the stage'
' to add properties to the entity type.'
' Using default of %s'), obj.name, entity_metadata)
if entity_metadata is not None and entity_metadata:
self.set_params(**entity_metadata)
self.trace_append(created_by=obj, msg='Adding entity type properties from function',
log_method=logger.debug, **entity_metadata)
# The stage may be a special stage that should be added to
# a special stages list, e.g. stages that have
# the property is_scd_lookup = True should be added to the
# _scd_stages list
specials = {'is_scd_lookup': self._scd_stages}
for function_prop, list_obj in list(specials.items()):
try:
is_function_prop = getattr(obj, function_prop)
except AttributeError:
is_function_prop = False
if is_function_prop:
list_obj.append(obj)
# Add for each granularity without frequency two AggregateItem stages. The result columns of these stages
# are used in the DataWriter when the aggregation results are pushed to the database
for gran in active_granularities:
if gran.freq is None:
for func_name, output_name in {('max', DataWriter.ITEM_NAME_TIMESTAMP_MAX),
('min', DataWriter.ITEM_NAME_TIMESTAMP_MIN)}:
new_stage = AggregateItems(input_items=[self._timestamp], aggregation_function=func_name,
output_items=[output_name])
new_stage._entity_type = self
new_stage.name = new_stage.__class__.__name__
new_stage._schedule = None
new_stage.granularity = gran
new_stage._input_set = {self._timestamp}
new_stage._output_list = [output_name]
stage_type = self.get_stage_type(new_stage)
stage_metadata[(stage_type, gran)].append(new_stage)
return stage_metadata
def build_stage_metadata(self, *args):
"""
Make a new JobController payload from a list of function objects
"""
metadata = []
for f in args:
# if function is deprecated it may have a replacement
f = self.get_replacement(f)
fn = {}
try:
name = f.name
except AttributeError:
name = f.__class__.__name__
fn['name'] = name
fn['object_instance'] = f
fn['description'] = f.__doc__
fn['functionName'] = f.__class__.__name__
fn['enabled'] = True
fn['execStatus'] = False
fn['schedule'] = None
fn['backtrack'] = None
fn['granularity'] = f.granularity
(fn['input'], fn['output'], fn['outputMeta'], fn['input_set'], fn['output_list']) = self.build_arg_metadata(
f)
fn['inputMeta'] = None
metadata.append(fn)
logger.debug(('Added local function instance as job stage: %s'), fn)
self._stages = self.build_stages(function_meta=metadata, granularities_dict=self._granularities_dict)
return metadata
def index_df(self, df):
"""
Create an index on the deviceid and the timestamp
"""
if self._df_index_entity_id is None:
self._df_index_entity_id = self._entity_id
if self._timestamp_col is None:
self._timestamp_col = self._timestamp
if df.index.names != [self._df_index_entity_id, self._timestamp]:
try:
df = df.set_index([self._df_index_entity_id, self._timestamp])
except KeyError:
df = reset_df_index(df, auto_index_name=self.auto_index_name)
try:
df = df.set_index([self._df_index_entity_id, self._timestamp])
except KeyError:
try:
df[self._df_index_entity_id] = df[self._entity_id]
df = df.set_index([self._df_index_entity_id, self._timestamp])
except KeyError:
raise KeyError(('Error attempting to index time series'
' dataframe. Unable to locate index'
' columns: %s or %s, %s') % (
self._df_index_entity_id, self._entity_id, self._timestamp))
logger.debug(('Indexed dataframe on %s, %s'), self._df_index_entity_id, self._timestamp)
else:
logger.debug(('Found existing index on %s, %s.'
'No need to recreate index'), self._df_index_entity_id, self._timestamp)
# create a dummy column for _entity_id
if self._entity_id != self._df_index_entity_id:
df[self._entity_id] = df.index.get_level_values(self._df_index_entity_id)
# create a dummy column for _timestamp
if self._timestamp != self._timestamp_col:
df[self._timestamp_col] = df.index.get_level_values(self._timestamp)
return df
def cos_save(self):
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to save to cloud object storage ')
raise ValueError(msg)
name = ['entity_type', self.name]
name = '.'.join(name)
self.db.cos_save(self, name)
@classmethod
def default_stage_type_map(cls):
"""
Configure how properties of stages are used to set the stage type
that is used by the job controller to decide how to process a stage
"""
return [('preload', 'is_preload'), ('get_data', 'is_data_source'), ('transform', 'is_transformer'),
('aggregate', 'is_data_aggregator'), ('simple_aggregate', 'is_simple_aggregator'),
('complex_aggregate', 'is_complex_aggregator'), ]
def df_sort_timestamp(self, df):
"""
Sort a dataframe on the timestamp column. Returns a tuple containing
the sorted dataframe and a column_name for the timestamp column.
"""
ts_col_name = self._timestamp
# timestamp may be column or in index
try:
df.sort_values([ts_col_name], inplace=True)
except KeyError:
try:
# legacy check for a redundant _timestamp alternative column
df.sort_values([self._timestamp_col], inplace=True)
ts_col_name = self._timestamp_col
except KeyError:
try:
df.sort_index(level=[ts_col_name], inplace=True)
except:
raise
return (df, ts_col_name)
def drop_tables(self, recreate=False):
"""
Drop tables known to be associated with this entity type
"""
self.db.drop_table(self.name, schema=self._db_schema, recreate=recreate)
self.drop_child_tables(recreate=recreate)
def drop_child_tables(self, recreate=False):
"""
Drop all child tables
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to drop child tables ')
raise ValueError(msg)
if self._dimension_table_name is None:
tables = []
else:
tables = [self._dimension_table_name]
tables.extend(self.activity_tables.values())
tables.extend(self.scd.values())
[self.db.drop_table(x, self._db_schema, recreate=recreate) for x in tables]
msg = 'dropped tables %s' % tables
logger.info(msg)
def exec_local_pipeline(self, start_ts=None, end_ts=None, entities=None, **kw):
"""
Test the functions on an entity type
Test will be run on local metadata. It will not use the server
job log. Results will be written to file.
"""
params = {'data_writer': DataWriterFile, 'keep_alive_duration': None, 'save_trace_to_file': True,
'default_backtrack': 'checkpoint', 'trace_df_changes': True, '_abort_on_fail': True,
'job_log_class': JobLogNull, '_auto_save_trace': None, '_start_ts_override': start_ts,
'_end_ts_override': end_ts, '_entity_filter_list': entities, '_production_mode': False}
kw = {**params, **kw}
job = JobController(payload=self, **kw)
# propagate parameters to functions
for f in self._functions:
for key, value in list(kw.items()):
setattr(f, key, value)
job.execute()
def get_attributes_dict(self):
"""
Produce a dictionary containing all attributes
"""
c = {}
for att in dir(self):
value = getattr(self, att)
if not callable(value):
c[att] = value
return c
def get_calc_pipeline(self, stages=None):
"""
Make a new CalcPipeline object. Reset processing variables.
"""
warnings.warn('get_calc_pipeline() is deprecated. Use build_job()', DeprecationWarning)
self._scd_stages = []
self._custom_calendar = None
self._is_initial_transform = True
return CalcPipeline(stages=stages, entity_type=self)
def get_function_replacement_metadata(self, meta):
"""
replace incoming function metadata for aggregate functions with
metadata that will be used to build a DataAggregator
"""
replacement = {'Sum': 'sum', 'Minimum': 'min', 'Maximum': 'max', 'Mean': 'mean', 'Median': 'median',
'Count': 'count', 'DistinctCount': 'count_distinct', 'StandardDeviation': 'std',
'Variance': 'var', 'Product': 'product', 'First': 'first', 'Last': 'last'}
name = meta.get('functionName', None)
replacement_name = replacement.get(name, None)
if replacement_name is not None:
meta['functionName'] = replacement_name
return (meta.get('granularity', None), meta)
else:
return (None, None)
def get_local_column_lists_by_type(self, columns, known_categoricals_set=None):
"""
Examine a list of columns and poduce a tuple containing names
of metric,dates,categoricals and others
"""
if known_categoricals_set is None:
known_categoricals_set = set()
metrics = []
dates = []
categoricals = []
others = []
if columns is None:
columns = []
all_cols = set([x.name for x in columns])
# exclude known categoricals that are not present in table
known_categoricals_set = known_categoricals_set.intersection(all_cols)
for c in columns:
data_type = c.type
if isinstance(data_type, (FLOAT, Float, INTEGER, Integer)):
metrics.append(c.name)
elif db_module.DB2_DOUBLE is not None and isinstance(data_type, db_module.DB2_DOUBLE):
metrics.append(c.name)
elif isinstance(data_type, (VARCHAR, String)):
categoricals.append(c.name)
elif isinstance(data_type, (TIMESTAMP, DateTime)):
dates.append(c.name)
else:
others.append(c.name)
msg = 'Found column %s of unknown data type %s' % (c, data_type.__class__.__name__)
logger.warning(msg)
# reclassify categoricals that did were not correctly classified based on data type
for c in known_categoricals_set:
if c not in categoricals:
categoricals.append(c)
metrics = [x for x in metrics if x != c]
dates = [x for x in dates if x != c]
others = [x for x in others if x != c]
return (metrics, dates, categoricals, others)
def get_custom_calendar(self):
return self._custom_calendar
def get_data(self, start_ts=None, end_ts=None, entities=None, columns=None):
"""
Retrieve entity data at input grain or preaggregated
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to retrieve database data ')
raise ValueError(msg)
tw = {} # info to add to trace
if entities is None:
tw['entity_filter'] = 'all'
else:
tw['entity_filter'] = '%s entities' % len(entities)
if self._pre_aggregate_time_grain is None:
df = self.db.read_table(table_name=self.name, schema=self._db_schema, timestamp_col=self._timestamp,
parse_dates=None, columns=columns, start_ts=start_ts, end_ts=end_ts,
entities=entities, dimension=self._dimension_table_name)
tw['pre-aggregeted'] = None
else:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(self.name, self._db_schema)
if self._dimension_table_name is not None:
categoricals.extend(self.db.get_column_names(self._dimension_table_name, self._db_schema))
if columns is None:
columns = []
columns.extend(metrics)
columns.extend(dates)
columns.extend(categoricals)
columns.extend(others)
# make sure each column is in the aggregate dictionary
# apply a default aggregate for each column not specified in the aggregation metadata
if self._pre_agg_rules is None:
self._pre_agg_rules = {}
self._pre_agg_outputs = {}
for c in columns:
try:
self._pre_agg_rules[c]
except KeyError:
if c not in [self._timestamp, self._entity_id]:
if c in metrics:
self._pre_agg_rules[c] = 'mean'
self._pre_agg_outputs[c] = 'mean_%s' % c
else:
self._pre_agg_rules[c] = 'max'
self._pre_agg_outputs[c] = 'max_%s' % c
else:
pass
df = self.db.read_agg(table_name=self.name, schema=self._db_schema, groupby=[self._entity_id],
timestamp=self._timestamp, time_grain=self._pre_aggregate_time_grain,
agg_dict=self._pre_agg_rules, agg_outputs=self._pre_agg_outputs, start_ts=start_ts,
end_ts=end_ts, entities=entities, dimension=self._dimension_table_name)
tw['pre-aggregeted'] = self._pre_aggregate_time_grain
tw['rows_retrieved'] = len(df.index)
tw['start_ts'] = start_ts
tw['end_ts'] = end_ts
self.trace_append(created_by=self, msg='Retrieved entity timeseries data for %s' % self.name, **tw)
# Optimizing the data frame size using downcasting
if self.enable_downcast:
memo = MemoryOptimizer()
df = memo.downcastNumeric(df)
try:
df = self.index_df(df)
except (AttributeError, KeyError):
pass
return df
def get_data_items(self):
"""
Get the list of data items defined
:return: list of dicts containting data item metadata
"""
return self._data_items
def get_excluded_cols(self):
"""
Return a list of physical columns that should be excluded when returning
the list of data items
"""
return ['logicalinterface_id', 'format', 'updated_utc', 'devicetype', 'eventtype']
def get_grain_freq(self, grain_name, lookup, default):
"""
Lookup a pandas frequency string from an AS granularity name
"""
for l in lookup:
if grain_name == l['name']:
return l['alias']
return default
def get_output_items(self):
"""
Get a list of non calculated items: outputs from the time series table
"""
items = [x.get('columnName') for x in self._data_items if
x.get('type') == 'METRIC' or x.get('type') == 'DIMENSION']
return items
def get_log(self, rows=100):
"""
Get KPI execution log info. Returns a dataframe.
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to get log data ')
raise ValueError(msg)
query, log = self.db.query(self.log_table, self._db_schema)
query = query.filter(log.c.entity_type == self.name).order_by(log.c.timestamp_utc.desc()).limit(rows)
df = self.db.read_sql_query(query)
return df
def get_latest_log_entry(self):
"""
Get the most recent log entry. Returns dict.
"""
last = self.get_log(rows=1)
last = last.to_dict('records')[0]
return last
def get_param(self, param, default=None):
try:
out = getattr(self, param)
except AttributeError:
out = default
return out
def get_end_ts_override(self):
if self._end_ts_override is not None:
if isinstance(self._end_ts_override, dt.datetime):
return self._end_ts_override
date_time_obj = dt.datetime.strptime(self._end_ts_override[0], '%Y-%m-%d %H:%M:%S')
return date_time_obj
return None
def get_stage_type(self, stage):
"""
Examine the stage object to determine how it should be processed by
the JobController
Sets the stage type to the first valid entry in the stage map
the stage map is a list of tuples containing a stage type and
a boolean property name:
example:
[('get_data','is_data_source'),
('simple_aggregate','is_simple_aggregate')]
if a stage has both an is_data_source = True and
a is_simple_aggregate = True, the stage type will be returned as
'get_data'
"""
for (stage_type, prop) in self._stage_type_map:
try:
prop_value = getattr(stage, prop)
except AttributeError:
pass
else:
if prop_value:
return stage_type
raise TypeError(('Could not identify stage type for stage'
' %s from the stage map. Adjust the stage map'
' for the entity type or define an appropriate'
' is_<something> property on the class of the '
' stage. Stage map is %s' % (stage.name, self._stage_type_map)))
def get_start_ts_override(self):
if self._start_ts_override is not None:
if isinstance(self._start_ts_override, dt.datetime):
date_time_obj = self._start_ts_override
else:
date_time_obj = dt.datetime.strptime(self._start_ts_override[0], '%Y-%m-%d %H:%M:%S')
return date_time_obj
return None
def get_replacement(self, obj):
"""
Get replacement for deprecated function
"""
try:
is_deprecated = obj.is_deprecated
except AttributeError:
is_deprecated = False
if is_deprecated:
try:
obj = obj.get_replacement()
except AttributeError:
msg = ('Skipped deprecated function. The function'
' %s has no designated replacement. Provide a'
' replacement by implementing the get_replacement()'
' method or rework entity type to remove the reference'
' to the deprecated function' % obj.__class__.__name__)
raise StageException(msg, obj.__class__.__name__)
else:
logger.debug('Entity Type has a reference to a deprecated'
' function. This function was automatically'
' replaced by %s', obj.__class__.__name__)
return obj
def generate_data(self, entities=None, days=0, seconds=300, freq='1min', scd_freq='1D', write=True,
drop_existing=False, data_item_mean=None, data_item_sd=None, data_item_domain=None, columns=None,
start_entity_id=None, auto_entity_count=None, datasource=None, datasourcemetrics=None):
"""
Generate random time series data for entities
Parameters
----------
entities: list
List of entity ids to genenerate data for
days: number
Number of days worth of data to generate (back from system date)
seconds: number
Number of seconds of worth of data to generate (back from system date)
freq: str
Pandas frequency string - interval of time between subsequent rows of data
write: bool
write generated data back to table with same name as entity
drop_existing: bool
drop existing time series, dimension, activity and scd table
data_item_mean: dict
mean values for generated data items. dict is keyed on data item name
data_item_sd: dict
std values for generated data items. dict is keyed on data item name
data_item_domain: dict
domains of values for categorical data items. dict is keyed on data item name
datasource: dataframe
dataframe as data source
datasourcemetrics : list of strings
list of relevant column for datasource
"""
if entities is None:
if start_entity_id is None:
start_entity_id = self._start_entity_id
if auto_entity_count is None:
auto_entity_count = self._auto_entity_count
entities = [str(start_entity_id + x) for x in list(range(auto_entity_count))]
if data_item_mean is None:
data_item_mean = {}
if data_item_sd is None:
data_item_sd = {}
if data_item_domain is None:
data_item_domain = {}
if drop_existing and self.db is not None:
self.drop_tables(recreate=True)
known_categoricals = set(data_item_domain.keys())
exclude_cols = ['deviceid', 'devicetype', 'format', 'updated_utc', 'logicalinterface_id', self._timestamp]
if self.db is None or self.is_local:
write = False
msg = 'This is a local entity or entity with no database connection, test data will not be written'
logger.debug(msg)
(metrics, dates, categoricals, others) = self.get_local_column_lists_by_type(columns,
known_categoricals_set=known_categoricals)
else:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(self.table, self._db_schema,
exclude_cols=exclude_cols,
known_categoricals_set=known_categoricals)
msg = 'Generating data for %s with metrics %s and dimensions %s and dates %s' % (
self.name, metrics, categoricals, dates)
logger.debug(msg)
ts = TimeSeriesGenerator(metrics=metrics, ids=entities, days=days, seconds=seconds, freq=freq,
categoricals=categoricals, dates=dates, timestamp=self._timestamp,
domains=data_item_domain, datasource=datasource, datasourcemetrics=datasourcemetrics)
ts.data_item_mean = data_item_mean
ts.data_item_sd = data_item_sd
ts.data_item_domain = data_item_domain
df = ts.execute()
dimension_table_exists = False
try:
dimension_table_exists = self.db.if_exists(table_name=self._dimension_table_name, schema=self._db_schema)
except Exception:
pass
if self._dimension_table_name is not None and dimension_table_exists:
self.generate_dimension_data(entities, write=write, data_item_mean=data_item_mean,
data_item_sd=data_item_sd, data_item_domain=data_item_domain)
if write and self.db is not None:
for o in others:
if o not in df.columns:
df[o] = None
df['logicalinterface_id'] = ''
df['devicetype'] = self.logical_name
df['format'] = ''
df['updated_utc'] = dt.datetime.utcnow()
self.db.write_frame(table_name=self.name, df=df, schema=self._db_schema, timestamp_col=self._timestamp)
for (at_name, at_table) in list(self.activity_tables.items()):
adf = self.generate_activity_data(table_name=at_name, activities=at_table._activities, entities=entities,
days=days, seconds=seconds, write=write)
msg = 'generated data for activity table %s' % at_name
logger.debug(msg)
for scd in list(self.scd.values()):
sdf = self.generate_scd_data(scd_obj=scd, entities=entities, days=days, seconds=seconds, write=write,
freq=scd_freq, domains=data_item_domain)
msg = 'generated data for scd table %s' % scd.name
logger.debug(msg)
return df
def generate_activity_data(self, table_name, activities, entities, days, seconds, write=True):
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to generate activity data ')
raise ValueError(msg)
try:
(metrics, dates, categoricals, others) = self.db.get_column_lists_by_type(table_name, self._db_schema,
exclude_cols=[self._entity_id,
'start_date',
'end_date'])
except KeyError:
metrics = []
dates = []
categoricals = []
others = []
metrics.append('duration')
categoricals.append('activity')
ts = TimeSeriesGenerator(metrics=metrics, dates=dates, categoricals=categoricals, ids=entities, days=days,
seconds=seconds, freq=self._activity_frequency)
ts.set_domain('activity', activities)
df = ts.execute()
df['start_date'] = df[self._timestamp]
duration = df['duration'].abs()
df['end_date'] = df['start_date'] + | pd.to_timedelta(duration, unit='h') | pandas.to_timedelta |
# enables access to directories/files
import os
# for handling data
import numpy as np
from numpy import array
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
# graphing
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import seaborn as sns
# statistics
from statsmodels.graphics.gofplots import qqplot
from scipy import stats
import scikit_posthocs as sp
from scipy.stats import zscore
from scipy.stats import ks_2samp
from statistics import mean
import statsmodels.api as sm
from statsmodels.formula.api import ols
from scipy import stats
from scipy.stats import zscore
from scipy.stats import ks_2samp
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
import scikit_posthocs as sp
from statsmodels.stats.anova import AnovaRM
from statsmodels.stats.libqsturng import psturng
import re
from ast import literal_eval
import more_itertools
import math
from matplotlib import lines
from matplotlib.offsetbox import AnchoredText
import imgkit
# machine learning
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from xgboost import XGBRegressor
from sklearn.metrics import explained_variance_score, r2_score
from sklearn.metrics import median_absolute_error
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
import scipy.cluster.hierarchy as hac
import matplotlib.gridspec as gridspec
import random
import six
from sklearn.preprocessing import LabelEncoder
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
from matplotlib import lines
from matplotlib.offsetbox import AnchoredText
def generate_dictionary_for_telomere_length_data(patharg):
"""
USAGE:
telomere_data_dict = generate_dictionary_for_telomere_length_data(directory)
Where the directory contains images of files containing telomere length data in
a predefined format. This function is written specifically for the Excel file templates
that I use, and will provide in this repository, but could be altered for any format.
The individual telomere lengths column is extracted, cleansed of missing values & DAPI-intensity
values; outliers (3 std devs from mean of column) are removed; and the telomere length values are
standardized to each other by use of fluorescent beads which calibrate according to inherent
differences between microscope imaging sessions. The individual's ID & timepoint (from filename) (KEY)
is associated with its respective individual telomere length data (VALUE) as a KEY:VALUE pair
in the dictionary. The dictionary can then be looped over to initialize all timepoint data
for that individual for analysis, i.e visualizations, statistics, etc.
"""
# initialize dictionary to hold our data
dict_astro_individ_telos_dfs = {}
# loop through directory to grab files
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(f'{file.name} telomere data acquisition in progress..')
try:
df = pd.read_excel(file)
except:
print(f'{file.name} File not found..')
return -1
df.rename(columns={'Unnamed: 3':'Individ Telos'}, inplace=True)
# these numbers correspond to rows containing information about the DAPI counterstain, NOT telomeres, so we drop
DAPI_values_to_drop=[5, 192, 379, 566, 753, 940, 1127, 1314, 1501, 1688, 1875, 2062,
2249, 2436, 2623, 2810, 2997, 3184, 3371, 3558, 3745, 3932, 4119, 4306, 4493,
4680, 4867, 5054, 5241, 5428]
# grabbing individual telomere length data from the file & dropping DAPI info
individual_telos_lengths = (df['Individ Telos'])
individual_telos_lengths = individual_telos_lengths.drop(labels=DAPI_values_to_drop)
# first pass at generating synthetic data for github exposition; to initialize actual
# data, comment out the line below, and uncomment the .iloc[] line
# individual_telos_lengths = individual_telos_lengths.sample(2500, random_state=1)
individual_telos_lengths = individual_telos_lengths.iloc[7:5611]
# ensure the telomere measurements are a numeric data type, drop any missing values,
# make data into a dataframe
telos_str_toNaN = pd.to_numeric(individual_telos_lengths, errors='coerce')
individual_telos_cleaned = telos_str_toNaN.dropna(axis=0, how='any')
telos_df = individual_telos_cleaned.to_frame(name=None)
# remove any telomere measurements that lie beyond 3 standard deviations of the mean
# the data is relatively normal in shape, & this process removes about ~10-20 telos from ~5520
# modest loss, acceptable to help standardize
telos_individ_df = telos_df[(np.abs(stats.zscore(telos_df)) < 3).all(axis=1)]
# logic clauses for recognizing which astronaut ID is in the sample name
# different astronauts were imaging at different times and thus associated with
# different Cy3 calibrations for the microscope, thus data is standardized according to Cy3
if ('5163' in file.name) or ('1536' in file.name):
telos_individ_df_cy3Cal = telos_individ_df.div(59.86)
elif '2171' in file.name or '4419' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(80.5)
elif '7673' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.11)
elif '2479' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.18)
elif '1261' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.16)
else:
telos_individ_df_cy3Cal = telos_individ_df
# average of all cy3 calibrated control telo measurements (11 age matched controls)
# telos_individ_df_cy3Cal = telos_individ_df_cy3Cal.div(116.1848153)
file_name_trimmed = file.name.replace('.xlsx', '')
dict_astro_individ_telos_dfs[file_name_trimmed] = telos_individ_df_cy3Cal
print('Done collecting all astronaut telomere length excel files')
return dict_astro_individ_telos_dfs
def astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone, axsNUMtwo):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone,axsNUMtwo].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
axs[axsNUMone,axsNUMtwo].set_title(f"Histogram of {astroname}'s Telomeres")
axs[axsNUMone,axsNUMtwo].set_xlabel('Bins of Individ. Telomeres')
axs[axsNUMone,axsNUMtwo].set_ylabel('Freqs of Individ. Telomeres')
axs[axsNUMone,axsNUMtwo].xaxis.set_major_locator(plt.MaxNLocator(12))
def astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
axs[axsNUMone].set_title(f'Histogram of Individual Telomeres for {astroname}')
axs[axsNUMone].set_xlabel('Bins of Individ. Telomeres')
axs[axsNUMone].set_ylabel('Freqs of Individ. Telomeres')
axs[axsNUMone].xaxis.set_major_locator(plt.MaxNLocator(19))
def gen_missing_values_andimpute_or_randomsampledown(n_cells, telosPercell, astro_df, option=None):
if astro_df.size > 5520:
astro_dfsampled = astro_df.sample(5520)
return astro_dfsampled
if astro_df.size > 25 and astro_df.size <= 2760:
missing_data_difference = abs( (n_cells * telosPercell) - astro_df.size )
rsampled = astro_df.sample(missing_data_difference, replace=True, random_state=28)
concat_ed = pd.concat([rsampled, astro_df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
concat_ed.reset_index(drop=True, inplace=True)
return concat_ed
if astro_df.size > 25 and astro_df.size < 5520:
missing_data_difference = abs( (n_cells * telosPercell) - astro_df.size )
if option == 'rsamp':
rsampled = astro_df.sample(missing_data_difference, random_state=28)
concat_ed = pd.concat([rsampled, astro_df], sort=False)
np.random.shuffle(concat_ed.to_numpy())
concat_ed.reset_index(drop=True, inplace=True)
return concat_ed
else:
return astro_df
else:
return astro_df
def statistics_between_timepoints(astro_pre, astro_mid1, astro_mid2, astro_post,
astro_prename, astro_mid1name, astro_mid2name, astro_postname, test):
print( astro_prename + ' vs ' + astro_mid1name,
test(astro_pre, astro_mid1), '\n',
astro_prename + ' vs ' + astro_mid2name,
test(astro_pre, astro_mid2),'\n',
astro_mid1name + ' vs ' + astro_postname,
test(astro_mid1, astro_post),'\n',
astro_mid1name + ' vs ' + astro_mid2name,
test(astro_mid1, astro_mid2),'\n',
astro_mid2name + ' vs ' + astro_postname,
test(astro_mid2, astro_post),'\n',
astro_prename + ' vs ' + astro_postname,
test(astro_pre, astro_post),'\n', )
def statistics_between_timepoints_prepost_only(astro_pre, astro_post, astro_prename, astro_postname):
print(astro_prename + ' compared vs ' + astro_postname,
stats.mannwhitneyu(astro_pre, astro_post),'\n', )
def get_astro_number_from_id(astro_id):
astro_num = ''
if astro_id == '5163':
astro_num = 99
synth = 'synthetic 1'
elif astro_id == '1536':
astro_num = 1
synth = 'synthetic 2'
elif astro_id == '7673':
astro_num = 2
synth = 'synthetic 3'
elif astro_id == '2479':
astro_num = 3
synth = 'synthetic 4'
elif astro_id == '2171':
astro_num = 4
synth = 'synthetic 5'
elif astro_id == '1261':
astro_num = 5
synth = 'synthetic 7'
elif astro_id == '3228':
astro_num = 6
synth = 'synthetic 8'
elif astro_id == '2381':
astro_num = 98
synth = 'synthetic 9'
elif astro_id == '4819':
astro_num = 7
synth = 'synthetic 10'
elif astro_id == '1062':
astro_num = 8
synth = 'synthetic 11'
elif astro_id == '2494':
astro_num = 9
synth = 'synthetic 12'
elif astro_id == '4419':
astro_num = 1011
synth = 'synthetic 99'
return astro_num, synth
def relative_flight_timepoint(name_key):
if 'L' in name_key:
flight_status = 'Pre-Flight'
elif 'FD' in name_key:
flight_status = 'Mid-Flight'
elif 'R' in name_key:
flight_status = 'Post-Flight'
return flight_status
def quartile_cts_rel_to_df1(df1, df2):
df1 = pd.DataFrame(df1)
df2 = pd.DataFrame(df2)
quartile_1 = df2[df2 <= df1.quantile(0.25)].count()
quartile_2_3 = df2[(df2 > df1.quantile(0.25)) & (df2 < df1.quantile(0.75))].count()
quartile_4 = df2[df2 >= df1.quantile(0.75)].count()
return quartile_1.values, quartile_2_3.values, quartile_4.values
def get_timepoint(name_key):
timepoint_5_char = ['L-270', 'L-180', 'FD140', 'FD260', 'R+105', 'R+180', 'R+270']
timepoint_4_char = ['L-60', 'FD45', 'FD90', 'R+60']
timepoint_3_char = ['R+5', 'R+7']
for timepoint in timepoint_5_char:
if timepoint in name_key:
timepoint = name_key[-5:]
return timepoint.strip()
for timepoint in timepoint_4_char:
if timepoint in name_key:
timepoint = name_key[-4:]
return timepoint.strip()
for timepoint in timepoint_3_char:
if timepoint in name_key:
timepoint = name_key[-3:]
return timepoint.strip()
def make_quartiles_columns(astro_df):
pos_1, pos_2, pos_3 = 6, 7, 8
astro_id, timepoint, flight, telo_data = 1, 2, 3, 4
for i, row in astro_df.iterrows():
astro_id_4digit = row[astro_id]
if row[flight] == 'Pre-Flight' and row[timepoint] == 'L-270':
preFlight_telos = row[telo_data]
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, preFlight_telos))
elif row[flight] == 'Pre-Flight' and row[timepoint] == 'L-180':
if 'L-270' in list(astro_df[astro_df['astro id'] == astro_id_4digit]['timepoint']):
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
elif 'L-270' not in list(astro_df[astro_df['astro id'] == astro_id_4digit]['timepoint']):
preFlight_telos = row[telo_data]
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, preFlight_telos))
elif row[flight] == 'Pre-Flight':
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
elif row[flight] == 'Mid-Flight':
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
elif row[flight] == 'Post-Flight':
astro_df.iat[i, pos_1], astro_df.iat[i, pos_2], astro_df.iat[i, pos_3] = (quartile_cts_rel_to_df1(preFlight_telos, row[telo_data]))
else:
print('unknown label in row[1] of the all patients df.. please check patient timepoint names')
return astro_df
def graphing_statistics_telomere_data(dict_astro_individ_telos_dfs):
astro_list_of_IDs = ['5163', '2171', '1536', '7673', '4819', '3228',
'2494', '2479', '2381', '1261', '1062']
timepoint_series = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140',
'FD260', 'R+5', 'R+7', 'R+60', 'R+180', 'R+270']
n=0
for idNO in astro_list_of_IDs:
n+=1
# #initialize blank list of timepoints
data = [[1, 0, 0, 0], [0]]
emptydata = pd.DataFrame(data)
astro_L270 = pd.DataFrame(data)
astro_L180 = pd.DataFrame(data)
astro_L60 = pd.DataFrame(data)
astro_Mid1 = pd.DataFrame(data)
astro_Mid2 = pd.DataFrame(data)
astro_R7 = pd.DataFrame(data)
astro_R60 = pd.DataFrame(data)
astro_R180 = pd.DataFrame(data)
astro_R270 = pd.DataFrame(data)
astro_L270name = ''
astro_L180name = ''
astro_L60name = ''
astro_Mid1name = ''
astro_Mid2name = ''
astro_R7name = ''
astro_R60name = ''
astro_R180name = ''
astro_R270name = ''
for j in timepoint_series:
for i in dict_astro_individ_telos_dfs.keys():
if (idNO in i) and j == 'L-270' and ('L-270' in i):
astro_L270 = dict_astro_individ_telos_dfs[i]
astro_L270name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'L-180' and ('L-180' in i):
astro_L180 = dict_astro_individ_telos_dfs[i]
astro_L180name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'L-60' and ('L-60' in i):
astro_L60 = dict_astro_individ_telos_dfs[i]
astro_L60name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and (j == 'FD45' or j == 'FD90') and (j in i):
astro_Mid1 = dict_astro_individ_telos_dfs[i]
astro_Mid1name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and (j == 'FD140' or j == 'FD260') and (j in i):
astro_Mid2 = dict_astro_individ_telos_dfs[i]
astro_Mid2name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+7' and (j in i):
astro_R7 = dict_astro_individ_telos_dfs[i]
astro_R7name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+60' and (j in i):
astro_R60 = dict_astro_individ_telos_dfs[i]
astro_R60name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+180' and (j in i):
astro_R180 = dict_astro_individ_telos_dfs[i]
astro_R180name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
elif (idNO in i) and j == 'R+270' and (j in i):
astro_R270 = dict_astro_individ_telos_dfs[i]
astro_R270name = i.replace('mphase TeloFISH', '').replace('.xlsx', '')
else:
continue
if idNO == '5163' or idNO == '2171' or idNO == '1536':
if (astro_L270.size > 25 or astro_L180.size > 25) and (astro_Mid1.size > 25 and astro_Mid2.size > 25 ) and (astro_R180.size > 25 or astro_R270.size > 25):
n_cells = 30
astro_L270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_L270, 'rsamp')
astro_L180 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_L180, 'rsamp')
astro_Mid1 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_Mid1, 'rsamp')
astro_Mid2 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_Mid2, 'rsamp')
astro_R180 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_R180, 'rsamp')
astro_R270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_R270, 'rsamp')
n_bins = 30
fig, axs = plt.subplots(2,2, sharey=True, tight_layout=False, figsize = (16, 12))
if astro_L270name != '':
if astro_R270name != '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L270, astro_L270, astro_L270name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L270, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L270, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R270, astro_L270, astro_R270name, 1, 1)
# print('stats')
# statistics_between_timepoints(astro_L270, astro_Mid1, astro_Mid2, astro_R270,
# astro_L270name, astro_Mid1name, astro_Mid2name, astro_R270name)
elif astro_R270name == '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L270, astro_L270, astro_L270name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L270, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L270, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R180, astro_L270, astro_R180name, 1, 1)
# print('stats')
# statistics_between_timepoints(astro_L270, astro_Mid1, astro_Mid2, astro_R180,
# astro_L270name, astro_Mid1name, astro_Mid2name, astro_R180name)
elif astro_L270name == '':
if astro_R270name == '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L180, astro_L180, astro_L180name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L180, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L180, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R180, astro_L180, astro_R180name, 1, 1)
# print('randomly sampled stats')
# statistics_between_timepoints(astro_L180, astro_Mid1, astro_Mid2, astro_R180,
# astro_L180name, astro_Mid1name, astro_Mid2name, astro_R180name)
elif astro_R270name != '':
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_L180, astro_L180, astro_L180name, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid1, astro_L180, astro_Mid1name, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_Mid2, astro_L180, astro_Mid2name, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astro_R270, astro_L180, astro_R270name, 1, 1)
# print('randomly sampled stats')
# statistics_between_timepoints(astro_L180, astro_Mid1, astro_Mid2, astro_R270,
# astro_L180name, astro_Mid1name, astro_Mid2name, astro_R270name)
else:
continue
# plt.savefig('Final telomere histogram random sampling dso'+idNO+'.pdf')
plt.show()
if idNO in ['7673', '4819', '3228', '2494', '2479', '2381', '1261', '1062']:
if (astro_L270.size > 25) and (astro_R270.size > 25):
n_cells = 30
# astro_L270name = f'synthetic astronaut {n} L+270'
# astro_R270name = f'synthetic astronaut {n} R+270'
astro_L270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_L270, 'rsamp')
astro_R270 = gen_missing_values_andimpute_or_randomsampledown(n_cells, 184, astro_R270, 'rsamp')
n_bins = 30
fig, axs = plt.subplots(2, sharey=True, tight_layout=False, figsize = (12, 14))
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astro_L270, astro_L270, astro_L270name, 0)
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astro_R270, astro_L270, astro_R270name, 1)
# statistics_between_timepoints_prepost_only(astro_L270, astro_R270, astro_L270name, astro_R270name)
else:
continue
# plt.savefig('Resampled telomere histogram dso'+idNO+'.pdf')
plt.show()
def grab_control_values_generate_dictionary(patharg):
dict_mean_individ_telos_dfs = {}
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(file.name, 'telomere data acquisition in progress..')
try:
df = pd.read_excel(file)
except:
print('File not found..')
return -1
df.rename(columns={'Unnamed: 3':'Mean Individ Telos'}, inplace=True)
mean_values_of_individual_telomere_lengths = (df['Mean Individ Telos'])
mean_values_of_individual_telomere_lengths = mean_values_of_individual_telomere_lengths.drop(labels=[5, 192, 379, 566, 753, 940, 1127, 1314,
1501, 1688, 1875, 2062, 2249, 2436, 2623, 2810, 2997, 3184, 3371, 3558, 3745, 3932, 4119, 4306, 4493, 4680, 4867, 5054, 5241, 5428])
mean_values_of_individual_telomere_lengths = mean_values_of_individual_telomere_lengths.iloc[7:5611]
meantelos_str_toNaN = pd.to_numeric(mean_values_of_individual_telomere_lengths, errors='coerce')
mean_individual_telos_cleaned = meantelos_str_toNaN.dropna(axis=0, how='any')
mean_individ_df = mean_individual_telos_cleaned.to_frame(name=None)
mean_individ_df = mean_individ_df[(np.abs(stats.zscore(mean_individ_df)) < 3).all(axis=1)]
if '0397' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.285)
elif '3907' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.179)
elif '1826' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.143)
elif '0100' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(59.86)
elif '0912' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
elif '0646' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
else:
mean_individ_df_cy3Cal = mean_individ_df
file_name_trimmed = file.name.replace('.xlsx', '')
mean_individ_df_cy3Cal = gen_missing_values_andimpute_or_randomsampledown(30, 184, mean_individ_df_cy3Cal, 'rsamp')
dict_mean_individ_telos_dfs[file_name_trimmed] = mean_individ_df_cy3Cal
print('data collection complete')
return dict_mean_individ_telos_dfs
def grab_control_telo_values_per_cell_generate_dictionary(patharg):
dict_mean_individ_telos_dfs = {}
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(file.name, 'telomere data acquisition in progress..')
try:
df = pd.read_excel(file, skiprows=3)
df = df.iloc[0:30, 12].to_frame()
except:
print('File not found..')
return -1
mean_individ_df = df.dropna(axis=0, how='any')
if '0397' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.285)
elif '3907' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.179)
elif '1826' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(2.143)
elif '0100' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(59.86)
elif '0912' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
elif '0646' in file.name:
mean_individ_df_cy3Cal = mean_individ_df.div(80.5)
else:
mean_individ_df_cy3Cal = mean_individ_df
file_name_trimmed = file.name.replace('.xlsx', '')
mean_individ_df_cy3Cal = mean_individ_df_cy3Cal.div(116.1848153)
dict_mean_individ_telos_dfs[file_name_trimmed] = mean_individ_df_cy3Cal
print('data collection complete')
return dict_mean_individ_telos_dfs
def grab_astro_telo_values_per_cell_generate_dictionary(patharg):
dict_astro_individ_telos_dfs = {}
for file in os.scandir(patharg):
if file.name.endswith('.xlsx') and file.name.startswith('~$') == False:
print(f'{file.name} telomere data acquisition in progress..')
try:
df = pd.read_excel(file, skiprows=3)
df = df.iloc[0:30, 12].to_frame()
except:
print(f'{file.name} File not found..')
return -1
telos_individ_df = df.dropna(axis=0, how='any')
if ('5163' in file.name) or ('1536' in file.name):
telos_individ_df_cy3Cal = telos_individ_df.div(59.86)
elif '2171' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(80.5)
elif '7673' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.11)
elif '2479' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.18)
elif '1261' in file.name:
telos_individ_df_cy3Cal = telos_individ_df.div(2.16)
else:
telos_individ_df_cy3Cal = telos_individ_df
telos_individ_df_cy3Cal = telos_individ_df_cy3Cal.div(116.1848153)
file_name_trimmed = file.name.replace('.xlsx', '')
dict_astro_individ_telos_dfs[file_name_trimmed] = telos_individ_df_cy3Cal
print('Done collecting all astronaut telomere length excel files')
return dict_astro_individ_telos_dfs
def raincloud_plot_astros_groups(x=None, y=None, data=None,
groupby=None, iterable=None):
group_df = data.groupby(groupby)
for item in iterable:
plot_df = group_df.get_group(item)
if x == 'timepoint':
#this line only needed for timepoint
plot_df[x].cat.remove_unused_categories(inplace=True)
ax = sns.set(font_scale=1)
#bw = sigma
ax = pt.RainCloud(x = x, y = y, data = plot_df, palette = "Set2", bw = .20,
width_viol = .8, figsize = (8,6), move=0.21, orient = "h")
plt.title(f'{item} telos', fontsize=16)
def make_astronaut_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = gen_missing_values_andimpute_or_randomsampledown(30, 184, pd.Series(telo_value.values.reshape(-1,)), 'rsamp')
data.append([astro_num, astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['astro number', 'astro id', 'timepoint', 'flight status', 'telo data', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df['Q1'] = 'telos preF Q1 <0.25'
astro_df['Q2-3'] = 'telos preF Q2-3 >0.25 & <0.75'
astro_df['Q4'] = 'telos preF Q4 >0.75'
astro_df = astro_df.sort_values(['astro number', 'timepoint']).reset_index(drop=True)
return astro_df
def make_astronaut_cell_data_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = pd.Series(telo_value.values.reshape(-1,))
data.append([astro_num, astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['astro number', 'astro id', 'timepoint', 'flight status', 'telo data per cell', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df['Q1'] = 'telos preF Q1 <0.25'
astro_df['Q2-3'] = 'telos preF Q2-3 >0.25 & <0.75'
astro_df['Q4'] = 'telos preF Q4 >0.75'
astro_df = astro_df.sort_values(['astro number', 'timepoint']).reset_index(drop=True)
return astro_df
def make_control_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
# astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = pd.Series(telo_value.values.reshape(-1,))
data.append([astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['control id', 'timepoint', 'flight status controls', 'telo data', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df = astro_df.sort_values(['control id', 'timepoint']).reset_index(drop=True)
return astro_df
def make_control_cell_data_dataframe(dict_astro_individ_telos_dfs):
data = []
for name_key, telo_value in dict_astro_individ_telos_dfs.items():
astro_id = name_key[3:7]
# astro_num, synth = get_astro_number_from_id(astro_id)
time_point = get_timepoint(name_key)
flight_status = relative_flight_timepoint(name_key)
telo_value = pd.Series(telo_value.values.reshape(-1,))
data.append([astro_id, time_point, flight_status, telo_value, np.mean(telo_value.values)])
astro_df = pd.DataFrame(data, columns = ['control id', 'timepoint', 'flight status controls', 'telo data per cell', 'telo means'])
sorter = ['L-270', 'L-180', 'L-60', 'FD45', 'FD90', 'FD140', 'FD260', 'R+5', 'R+7', 'R+60', 'R+105', 'R+180', 'R+270']
astro_df['timepoint'] = astro_df['timepoint'].astype('category')
astro_df['timepoint'].cat.set_categories(sorter, inplace=True)
astro_df = astro_df.sort_values(['control id', 'timepoint']).reset_index(drop=True)
return astro_df
def mid_split(row):
if 'FD90' in row or 'FD45' in row:
return 'Mid-Flight 1'
elif 'FD140' in row or 'FD260' in row:
return 'Mid-Flight 2'
elif 'L' in row:
return 'Pre-Flight'
elif 'R' in row:
return 'Post-Flight'
def histogram_plot_groups(x=None, data=None,
groupby=None, iterable=None):
group_df = data.groupby(groupby)
for item in iterable:
plot_df = group_df.get_group(item)
non_irrad = plot_df[plot_df['timepoint'] == '1 non irrad'][x]
irrad_4_Gy = plot_df[plot_df['timepoint'] == '2 irrad @ 4 Gy'][x]
three_B = plot_df[plot_df['timepoint'] == '3 B'][x]
four_C = plot_df[plot_df['timepoint'] == '4 C'][x]
n_bins = 70
fig, axs = plt.subplots(2, 2, sharey=True, tight_layout=False, figsize=(20, 13))
ax = sns.set_style(style="darkgrid",rc= {'patch.edgecolor': 'black'})
ax = sns.set(font_scale=1)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, non_irrad, non_irrad, f'patient #{item} 1 non rad', 0, 0)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, irrad_4_Gy, non_irrad, f'patient #{item} 2 irrad @ 4 Gy', 0, 1)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, three_B, non_irrad, f'patient #{item} 3 B', 1, 0)
telo_mrp.histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, four_C, non_irrad, f'patient #{item} 4 C', 1, 1)
def initialize_telo_data_1st_timepoint_variable(timepoint=None, df=None):
if timepoint in list(df['timepoint'].unique()):
variable = df[df['timepoint'] == str(timepoint)]['telo data exploded']
return variable
elif timepoint not in list(df['timepoint'].unique()):
variable = pd.DataFrame([[0,1],[0,1]])
return variable
def initialize_telo_data_timepoint_or_blank(timepoint, df):
if timepoint in list(df['timepoint'].unique()):
timepoint_telo_data = df[df['timepoint'] == str(timepoint)]['telo data exploded']
name_id = str(df['astro id'].unique()[0])
name_timepoint = f' {timepoint}'
name_total = 'dso' + name_id + name_timepoint
return name_total, timepoint_telo_data
elif timepoint not in list(df['timepoint'].unique()):
timepoint_telo_data = pd.DataFrame([0,1],[0,1])
name = ''
return name, timepoint_telo_data
########################################################################################################################
########################################################################################################################
# FUNCTIONS FOR GRAPHING INDIVIDUAL TELOMERES
########################################################################################################################
########################################################################################################################
def graph_four_histograms(quartile_ref, n_bins, df1, df2, df3, df4,
name1, name2, name3, name4):
n_bins = n_bins
fig, axs = plt.subplots(2,2, sharey=True, sharex=True, constrained_layout=True, figsize = (8, 6))
sns.set_style(style="darkgrid",rc= {'patch.edgecolor': 'black'})
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.rc('xtick',labelsize=16)
plt.rc('ytick',labelsize=16)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df1, quartile_ref, name1, 0, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df2, quartile_ref, name2, 0, 1)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df3, quartile_ref, name3, 1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, df4, quartile_ref, name4, 1, 1)
def graph_two_histograms(quartile_ref, n_bins, df1, df2,
name1, name2, controls=None):
n_bins = n_bins
fig, axs = plt.subplots(2, sharey=True, constrained_layout=True, figsize = (8, 6))
sns.set_style(style="darkgrid",rc= {'patch.edgecolor': 'black'})
for ax in axs.flat:
ax.label_outer()
plt.rc('xtick',labelsize=16)
plt.rc('ytick',labelsize=16)
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, df1, quartile_ref, name1, 0)
astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, df2, quartile_ref, name2, 1)
# csfont = {'fontname':'sans-serif'}
# plt.suptitle(f"Individual Telomere Length Distributions at Pre and Post-Flight: {name1[0:8]}",
# y=.95, fontsize=14, **csfont)
# if controls == True:
# csfont = {'fontname':'sans-serif'}
# plt.suptitle(f"Individual Telomere Length Distributions at Pre and Post-Flight: All Control Samples",
# y=.95, fontsize=14, **csfont)
def astronaut_histogram_stylizer_divyBins_byQuartile(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone, axsNUMtwo):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone,axsNUMtwo].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
modified_astroname = astroname.replace('astro', '')
axs[axsNUMone,axsNUMtwo].set_title(f"{modified_astroname}", fontsize=16,)
font_axes=16
if axsNUMone == 0 and axsNUMtwo == 0:
axs[axsNUMone,axsNUMtwo].set_ylabel("Individual Telomere Counts", fontsize=font_axes)
if axsNUMone == 1 and axsNUMtwo == 0:
axs[axsNUMone,axsNUMtwo].set_ylabel("Individual Telomere Counts", fontsize=font_axes)
axs[axsNUMone,axsNUMtwo].set_xlabel("Bins of Individual Telomeres (RFI)", fontsize=font_axes)
if axsNUMone == 1 and axsNUMtwo == 1:
axs[axsNUMone,axsNUMtwo].set_xlabel("Bins of Individual Telomeres (RFI)", fontsize=font_axes)
axs[axsNUMone,axsNUMtwo].xaxis.set_major_locator(plt.MaxNLocator(7))
def astronaut_histogram_stylizer_divyBins_byQuartile_2Stacked(fig, axs, n_bins, astroDF, astroquartile, astroname, axsNUMone):
astroDF = astroDF.to_numpy()
astroquartile = astroquartile.to_numpy()
N, bins, patches = axs[axsNUMone].hist(astroDF, bins=n_bins, range=(0, 400), edgecolor='black')
for a in range(len(patches)):
if bins[a] <= np.quantile(astroquartile, 0.25):
patches[a].set_facecolor('#fdff38')
elif np.quantile(astroquartile, 0.25) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.50):
patches[a].set_facecolor('#d0fefe')
elif np.quantile(astroquartile, 0.50) < bins[a] and bins[a] <= np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#d0fefe')
elif bins[a] > np.quantile(astroquartile, 0.75):
patches[a].set_facecolor('#ffbacd')
axs[axsNUMone].set_title(f"{astroname}", fontsize=16,)
font_axes=16
if axsNUMone == 0 or axsNUMone == 1:
axs[axsNUMone].set_ylabel("Individual Telomere Counts", fontsize=font_axes)
if axsNUMone == 1:
axs[axsNUMone].set_xlabel("Bins of Individual Telomeres (RFI)", fontsize=font_axes)
axs[axsNUMone].xaxis.set_major_locator(plt.MaxNLocator(7))
def make_histograms_colored_by_quartile_for_astronauts(exploded_telos_df=None, astro_ids=None, nbins=45):
# astro_ids = ['5163', '2171', '1536', '7673', '4819', '3228', '2494', '2479', '2381', '1261', '1062']
grouped_data = exploded_telos_df.groupby('astro id')
# by looping through astronaut ids, we'll pull out their respective dataframes
# once we have the astronauts respective dfs, we'll figure out the quartile df &
for astro_id_num in astro_ids:
if astro_id_num not in grouped_data.groups.keys():
break
plot_df = grouped_data.get_group(astro_id_num)
for timepoint in ['L-270', 'L-180']:
first_timepoint = initialize_telo_data_1st_timepoint_variable(timepoint=timepoint, df=plot_df)
if first_timepoint.size > 30:
break
quartile_ref = first_timepoint
# okay, now we have the first timepoint as the reference for making quartile cutoffs!
# now need to intialize other values!
name_L270, astro_L270 = initialize_telo_data_timepoint_or_blank('L-270', plot_df)
name_L180, astro_L180 = initialize_telo_data_timepoint_or_blank('L-180', plot_df)
if '5163' == astro_id_num or '1536' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_telo_data_timepoint_or_blank('FD90', plot_df)
name_Mid2, astro_Mid2 = initialize_telo_data_timepoint_or_blank('FD140', plot_df)
if '2171' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_telo_data_timepoint_or_blank('FD45', plot_df)
name_Mid2, astro_Mid2 = initialize_telo_data_timepoint_or_blank('FD260', plot_df)
name_R180, astro_R180 = initialize_telo_data_timepoint_or_blank('R+180', plot_df)
name_R270, astro_R270 = initialize_telo_data_timepoint_or_blank('R+270', plot_df)
if ('5163' == astro_id_num) or ('2171' == astro_id_num) or ('1536' == astro_id_num):
n_bins = n_bins
if name_L270 != '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R270,
name_L270, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R180,
name_L270, name_Mid1, name_Mid2, name_R180)
elif name_L270 == '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R270,
name_L180, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R180,
name_L180, name_Mid1, name_Mid2, name_R180)
elif astro_id_num in ['7673', '4819', '3228', '2494', '2479', '2381', '1261', '1062']:
n_bins = 60
graph_two_histograms(quartile_ref, n_bins, astro_L270, astro_R270, name_L270, name_R270)
plt.savefig(f'../individual telomere length histogram distributions/png/dso{astro_id_num} histogram of individual telomere length distributions.png', dpi=600)
plt.savefig(f'../individual telomere length histogram distributions/svg/dso{astro_id_num} histogram of individual telomere length distributions.svg', format='svg', dpi=1500)
def initialize_encoded_telo_data_timepoint_or_blank(timepoint, df):
if timepoint in list(df['timepoint'].unique()):
timepoint_telo_data = df[df['timepoint'] == str(timepoint)]['telo data exploded']
name_id = str(df['encoded astro id'].unique()[0])
name_timepoint = f' {timepoint}'
name_total = 'astro ' + name_id + name_timepoint
return name_total, timepoint_telo_data
elif timepoint not in list(df['timepoint'].unique()):
timepoint_telo_data = pd.DataFrame([0,1],[0,1])
name = ''
return name, timepoint_telo_data
def make_histograms_colored_by_quartile_for_encoded_astronauts(exploded_telos_df=None, astro_ids=None, n_bins=60, save=True):
grouped_data = exploded_telos_df.groupby('encoded astro id')
for astro_id_num in astro_ids:
if astro_id_num not in grouped_data.groups.keys():
break
plot_df = grouped_data.get_group(astro_id_num)
for timepoint in ['L-270', 'L-180']:
first_timepoint = initialize_telo_data_1st_timepoint_variable(timepoint=timepoint, df=plot_df)
if first_timepoint.size > 30:
break
quartile_ref = first_timepoint
name_L270, astro_L270 = initialize_encoded_telo_data_timepoint_or_blank('L-270', plot_df)
name_L180, astro_L180 = initialize_encoded_telo_data_timepoint_or_blank('L-180', plot_df)
if 'A' == astro_id_num or 'C' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_encoded_telo_data_timepoint_or_blank('FD90', plot_df)
name_Mid2, astro_Mid2 = initialize_encoded_telo_data_timepoint_or_blank('FD140', plot_df)
if 'B' == astro_id_num:
name_Mid1, astro_Mid1 = initialize_encoded_telo_data_timepoint_or_blank('FD45', plot_df)
name_Mid2, astro_Mid2 = initialize_encoded_telo_data_timepoint_or_blank('FD260', plot_df)
name_R180, astro_R180 = initialize_encoded_telo_data_timepoint_or_blank('R+180', plot_df)
name_R270, astro_R270 = initialize_encoded_telo_data_timepoint_or_blank('R+270', plot_df)
if ('B' == astro_id_num) or ('A' == astro_id_num) or ('C' == astro_id_num):
n_bins = n_bins
if name_L270 != '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R270,
name_L270, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L270, astro_Mid1, astro_Mid2, astro_R180,
name_L270, name_Mid1, name_Mid2, name_R180)
elif name_L270 == '':
if name_R270 != '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R270,
name_L180, name_Mid1, name_Mid2, name_R270)
elif name_R270 == '':
graph_four_histograms(quartile_ref, n_bins, astro_L180, astro_Mid1, astro_Mid2, astro_R180,
name_L180, name_Mid1, name_Mid2, name_R180)
if save:
plt.savefig(f'../MANUSCRIPT 2 ASTROS/figures/dso{astro_id_num} histogram of individual telomere length distributions.png',
bbox_inches='tight', dpi=600)
########################################################################################################################
########################################################################################################################
# FUNCTIONS FOR CORRELATING TELOMERES WITH ANALYTE DATA
########################################################################################################################
########################################################################################################################
def select_astros_of_interest(analyte_df, telomere_df, astro_ids_of_interest, target):
if 'astro id' in telomere_df.columns:
telomere_df['astro id'] = telomere_df['astro id'].astype('str')
if 'astro id' in analyte_df.columns:
analyte_df['astro id'] = analyte_df['astro id'].astype('str')
if 'sample type' in analyte_df.columns:
analyte_df.drop('sample type', axis=1, inplace=True)
# dropping unnecessary cols from telo df
for col in ['astro number', 'timepoint']:
if col in telomere_df.columns:
telomere_df.drop([col], axis=1, inplace=True)
trim_astro_df = telomere_df.copy()
if 'all astros' in astro_ids_of_interest:
# i.e as of 10/7/19 I only have n=4 (contains astro id col) & n=11 (no astro id) dataframes for analytes
# I think when I received n=3 astros.. just type astro ids for astro_ids_of_interest, it will work properly
# or.. if i receive n=11 dataframe with labeled astros..
# just rewrite this area to accept n=11 df w/ astro id col
if 'astro id' in analyte_df.columns:
(print("Possible error.. the astro id column is present.. all astros were requested but this df potentially"
"contains less than all 11 astros.. drop astro id col and retry"))
return
else:
# retain all astro ids
selected_astros = trim_astro_df
id_values = ['flight status']
elif 'all astros' not in astro_ids_of_interest:
# subset astro ids of interest
selected_astros = trim_astro_df[trim_astro_df['astro id'].isin(astro_ids_of_interest)].reset_index(drop=True)
id_values = ['astro id', 'flight status']
return analyte_df, selected_astros, id_values
def merge_analyte_telomere_data(analyte_df, selected_astros, id_values, telos_percent_change, target):
# take mean telomere length values of all astronauts or per astros of interest & merge with analytes
mean_selected_astros = selected_astros.groupby(id_values).agg('mean').reset_index()
if telos_percent_change == 'yes':
mean_selected_astros[target] = (mean_selected_astros[target]
.apply(lambda row: make_telos_percent_change(row)))
merge_analyte_df = analyte_df.merge(mean_selected_astros, on=id_values)
# prepare to drop any columns w/ missing data
indexer=['timepoint', target]
for id_value in id_values:
indexer.append(id_value)
return merge_analyte_df, indexer
def how_drop_missing_values(merge_analyte_df, how_drop_missing, indexer):
# drop every analyte (columns) with missing data
if how_drop_missing == 'by column':
pivot_merge = (merge_analyte_df.pivot_table(index=indexer, columns='biochemistry analyte',
values='measured analyte').reset_index())
pivot_merge.dropna(axis=1, inplace=True)
cleaned_data = pivot_merge.melt(id_vars=indexer, var_name='biochemistry analyte',
value_name='measured analyte').reset_index(drop=True)
# drop missing data on per analyte/timepoint/astro (row) basis
elif how_drop_missing == 'by melted row':
cleaned_data = merge_analyte_df.dropna(axis=0)
return cleaned_data
def retain_flight_status(cleaned_data, retain_what_flight_status):
# retaining analytes for which flight status
if retain_what_flight_status == 'any':
retained_data = cleaned_data
elif bool(set(retain_what_flight_status) & set(['Pre-Flight', 'Mid-Flight', 'Post-Flight'])) == True:
retained_data = cleaned_data[cleaned_data['flight status'].isin(retain_what_flight_status)].copy()
elif retain_what_flight_status == 'require at least one per status':
total_analytes = list(cleaned_data['biochemistry analyte'].unique())
analytes_3_unique_flight = []
groupby_analyte = cleaned_data.groupby('biochemistry analyte')
for analyte in total_analytes:
# make groups by analyte
get_group_by_analyte = groupby_analyte.get_group(analyte)
# look at unique flight status values per analyte
g_f_s_t = list(get_group_by_analyte['flight status'].unique())
# if pre, mid, and post flight values in unique value list per analyte, then add this analyte to a list
if 'Pre-Flight' in g_f_s_t and 'Mid-Flight' in g_f_s_t and 'Post-Flight' in g_f_s_t:
analytes_3_unique_flight.append(analyte)
# retain only analytes with at least one measurement per flight status
analytes_only_3_unique_df = cleaned_data[cleaned_data['biochemistry analyte'].isin(analytes_3_unique_flight)].copy()
return analytes_only_3_unique_df
return retained_data
def make_telos_percent_change(row):
percent_chg_telos = ((row - 0.938117) / 0.938117) * 100
return percent_chg_telos
def correlate_astro_analytes_telomeres_pipeline(analyte_df=None, telomere_df=None, target=None,
astro_ids_of_interest=None,
how_drop_missing=None, retain_what_flight_status=None,
telos_percent_change='no'):
"""
High level fxn description
Args:
analyte_df (pandas dataframe): Contains either n=4 or n=11 biochemical analyte data in tidy data format.
telomere_df (pandas dataframe): Must contain complete telomere length data in tidy data format.
astro_ids_of_interest (str or list of str): Accepts either 'all astros' as str, whereby all astronaut data is
used for correlating telo/analyte data, or a list of astro ids to subset data for analysis.
how_drop_missing (str): Accepts either 'by column', which drops any analyte containing at least one missing value,
or 'by melted row', which drops only single instances of missing values.
retain_what_flight_status (str or list of tring): decides how to subset individual analytes based on what
flight status labels they have
Accepts: 'any', whereby no subselection is placed on analytes based on flight status,
or: subset data by flight status (list of str) for all analytes as a GROUP i.e ['Pre-Flight'] or ['Pre-Flight', 'Post-Flight']
or: 'require at least one per status', where EACH analytes must have at least one measurement per flight status
Returns:
retained_data (pandas dataframe): Data subject to the processing steps described above.
"""
# selecting astros of interest & capturing id values for handling merges
analyte_df, selected_astros, id_values = select_astros_of_interest(analyte_df, telomere_df, astro_ids_of_interest, target)
# merging analyte & telomere data, capturing indexer for handling missing data
merge_analyte_df, indexer = merge_analyte_telomere_data(analyte_df, selected_astros, id_values, telos_percent_change, target)
# dropping missing values based on input
cleaned_data = how_drop_missing_values(merge_analyte_df, how_drop_missing, indexer)
# subsetting values based on flight status labels
retained_data = retain_flight_status(cleaned_data, retain_what_flight_status)
return retained_data
def find_high_correlates_analytes_mean_telos(merged_analyte_blood_tidy_df, corr_cutoff, corr_loc=0, astro_ids=False, target=None):
if astro_ids == False:
corr_value_tests = []
grouped_by_analyte = merged_analyte_blood_tidy_df.groupby('biochemistry analyte')
for group in list(merged_analyte_blood_tidy_df['biochemistry analyte'].unique()):
corr_value = grouped_by_analyte.get_group(group).corr()[target][corr_loc]
if abs(corr_value) > corr_cutoff:
corr_value_tests.append([group, corr_value])
# print(f"{group}: {corr_value:.4f}")
return corr_value_tests
elif astro_ids == True:
corr_value_requested = input('Please state index for correlation value in corr().. 0 or 1')
corr_value_tests = []
astro_ids = list(merged_analyte_blood_tidy_df['astro id'].unique())
astro_id_group = merged_analyte_blood_tidy_df.groupby('astro id')
for astro in astro_ids:
individ_astro_df = astro_id_group.get_group(astro)
analyte_grouped_by_individ = individ_astro_df.groupby('biochemistry analyte')
analytes = list(individ_astro_df['biochemistry analyte'].unique())
for analyte in analytes:
corr_value = analyte_grouped_by_individ.get_group(analyte).corr()[target][int(corr_value_requested)]
corr_value_tests.append([astro, analyte, corr_value])
return corr_value_tests
def plot_diverging_correlations(list_correlates=None, target_name=None, figsize=(11,7),
dpi=600, color1='black', color2='green', fontsize=16,
y_label_name='Blood biochemistry analytes',
path_labels='', save=True):
df = list_correlates.copy()
x = df['correlation value']
df['colors'] = [color2 if x < 0 else color1 for x in df['correlation value']]
df.sort_values('correlation value', inplace=True)
df.reset_index(inplace=True, drop=True)
plt.figure(figsize=figsize, dpi=dpi)
plt.hlines(y=df.index, xmin=0, xmax=df['correlation value'], color=df['colors'], alpha=0.6, linewidth=7)
# Decorations
plt.yticks(df.index, df['biochemistry analyte'], fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.xlabel(target_name, fontsize=fontsize)
plt.ylabel(y_label_name, fontsize=fontsize)
plt.grid(linestyle='-', alpha=.2, color='black')
plt.tight_layout()
my_xticks = np.array([-1, -.5, 0, .5, 1])
plt.xticks(my_xticks[::1])
if save:
plt.savefig(f'../MANUSCRIPT 11 ASTROS/figures/11 astros diverging bars {y_label_name} {target_name} {path_labels} n=11.png',
dpi=dpi, bbox_inches='tight')
def analyze_biochem_analytes_target(df=None, target=None, melt_biochem_df=None,
merge_telomere_biochem_data=False, astro_ids_of_interest='all astros',
parse_correlation_values=True, abs_value_corr=0.6,
parse_corr_min=0, parse_corr_max=0.8,
color1='black', color2='green', fontsize=16,
figsize=(9,5), y_label_name='Blood biochemistry analytes',
path_labels='', save=True):
if merge_telomere_biochem_data == True:
# merge analyte & telomere data
merged_df = correlate_astro_analytes_telomeres_pipeline(analyte_df=melt_biochem_df, telomere_df=df,
target=target, astro_ids_of_interest=astro_ids_of_interest,
how_drop_missing='by melted row',
retain_what_flight_status='require at least one per status',
telos_percent_change='no')
elif merge_telomere_biochem_data == False:
merged_df = df.copy()
# find highly correlated analytes
corr_value_tests = find_high_correlates_analytes_mean_telos(merged_df, abs_value_corr, corr_loc=0,
astro_ids=False, target=target)
# turn correlated analytes/mean telomere length into dataframe
blood_n11_high_corr_values = pd.DataFrame(corr_value_tests, columns=['biochemistry analyte', 'correlation value'])
if parse_correlation_values:
blood_n11_high_corr_values = blood_n11_high_corr_values[(blood_n11_high_corr_values['correlation value'] < parse_corr_min) |
(blood_n11_high_corr_values['correlation value'] > parse_corr_max)].copy()
# plot diverging bars correlates
plot_diverging_correlations(list_correlates=blood_n11_high_corr_values,
target_name=target, figsize=figsize,
color1=color1, color2=color2, save=save,
y_label_name=y_label_name, fontsize=fontsize,
path_labels=path_labels)
return merged_df
def scipy_anova_post_hoc_tests(df=None, flight_status_col='flight status', target='telo data per cell',
sig_test=stats.f_oneway, post_hoc=sp.posthoc_ttest):
g_1 = df[df[flight_status_col] == 'Pre-Flight'][target]
g_2 = df[df[flight_status_col] == 'Mid-Flight'][target]
g_3 = df[df[flight_status_col] == 'Post-Flight'][target]
statistic, p_value = sig_test(g_1, g_2, g_3)
print(f'ONE WAY ANOVA for telomere length: {p_value}')
# if anova detects sig diff, perform post-hoc tests
if p_value <= 0.05:
print('bonferroni')
display(sp.posthoc_ttest(df, val_col=target, group_col=flight_status_col, equal_var=True,
p_adjust=None))
def telos_scipy_anova_post_hoc_tests(df0=None, time_col='flight status', target='individual telomeres',
sig_test=stats.f_oneway, post_hoc=None, repeated_measures=False):
df = df0.copy()
df.rename({'telo data per cell': 'telo_data_per_cell',
'flight status': 'flight_status',
'Mean Telomere Length (qPCR)': 'Mean_Telomere_Length_(qPCR)',
'Telomerase Activity (qPCR)': 'Telomerase Activity (qPCR)',
'astro id': 'astro_id'}, axis=1, inplace=True)
if ' ' in time_col:
time_col = time_col.replace(' ', '_')
if ' ' in target:
target = target.replace(' ', '_')
if repeated_measures == False:
g_1 = df[df[time_col] == 'Pre-Flight'][target]
g_2 = df[df[time_col] == 'Mid-Flight'][target]
g_3 = df[df[time_col] == 'Post-Flight'][target]
statistic, p_value = sig_test(g_1, g_2, g_3)
print(f'ONE WAY ANOVA for telomere length: {p_value}')
elif repeated_measures:
results = AnovaRM(df, target, 'astro_id',
within=[time_col], aggregate_func='mean').fit()
# pvalue
p_value = results.anova_table['Pr > F'][0]
print(f'REPEATED MEASURES ANOVA for telomere length: {p_value}')
# if anova detects sig diff, perform post-hoc tests
if p_value <= 0.05:
mc = MultiComparison(df[target], df[time_col])
mc_results = mc.tukeyhsd()
print(mc_results)
res = mc_results
print(f'TukeyHSD pvalues: {list(psturng(np.abs(res.meandiffs / res.std_pairs), len(res.groupsunique), res.df_total))}')
# print('\nbonferroni pvalues')
# display(sp.posthoc_ttest(df, val_col=target, group_col=time_col, equal_var=False,
# p_adjust='bonferroni'))
def id_encode_letters(row):
if row == '1536':
row = 'A'
elif row == '2171':
row = 'B'
return row
def eval_make_test_comparisons(df=None, timepoints=None, test=None, test_name=None,
target='individual telos'):
timepoints = list(df['timepoint'].unique())
timept_pairs = []
row = []
df_list = []
for timept in timepoints:
df_list.append(df[df['timepoint'] == timept][target])
for iter1, df in zip(timepoints, df_list):
for iter2, i in zip(timepoints, range(len(df_list))):
pair1, pair2 = f"{iter1}:{iter2}", f"{iter2}:{iter1}"
if iter1 != iter2 and pair1 not in timept_pairs and pair2 not in timept_pairs:
stat, pvalue = test(df, df_list[i])
print(f'{test_name} | {iter1} vs {iter2} {pvalue}')
timept_pairs.append(pair1)
timept_pairs.append(pair2)
row.append([test_name, iter1, iter2, pvalue])
return timept_pairs, row
def make_post_flight_df_and_merge(astro_df=None, exploded_telos=None, timepoint=None):
"""
parse out mean telomere length & #s short/long telomeres from specific post-flight (R+7, R+60, ... R+270) timepoints
and merge with exploded_telos dataframe for machine learning prep
"""
# parsing out post-flight data of interest
timepoint_df = astro_df[astro_df['timepoint'] == timepoint].copy()
for col in ['telo means', 'Q1', 'Q4']:
timepoint_df.rename({col: f'{timepoint} {col}'}, axis=1, inplace=True)
timepoint_df.drop(['astro number', 'timepoint', 'flight status'], axis=1, inplace=True)
# extracting pre-flight individual telomere data only
exploded_telos_pref = exploded_telos[exploded_telos['flight status'] == 'Pre-Flight'].copy()
exploded_telos_pref.drop(['astro number', 'flight status'], axis=1, inplace=True)
merge_df = exploded_telos_pref.merge(timepoint_df, on=['astro id'])
return merge_df
class make_features(BaseEstimator, TransformerMixin):
def __init__(self, make_log_individ_telos=False, make_log_target=False):
self.make_log_individ_telos = make_log_individ_telos
self.make_log_target = make_log_target
def fit(self, X, y=None):
return self
def create_log_individ_telos(self, X, y=None):
X['individual telos'] = np.log1p(X['individual telos'])
return X
def create_log_target(self, X, y=None):
X['4 C telo means'] = np.log1p(X['4 C telo means'])
return X
def transform(self, X, y=None):
if self.make_log_individ_telos:
X = self.create_log_individ_telos(X)
if self.make_log_target:
X = self.create_log_target(X)
return X
class make_dummies(BaseEstimator, TransformerMixin):
def __init__(self, drop_first=True, cols_to_dummify=['timepoint'], how_dummify='encode'):
self.drop_first = drop_first
self.cols_to_dummify = cols_to_dummify
self.how_dummify=how_dummify
def fit(self, X, y=None):
return self
def transf_dummies(self, X, y=None):
dummies = pd.get_dummies(X, drop_first=self.drop_first, columns=self.cols_to_dummify)
return dummies
def label_encode(self, X, y=None):
label_encoder = preprocessing.LabelEncoder()
X['encoded_timepoint'] = label_encoder.fit_transform(X[self.cols_to_dummify].values.ravel())
X.drop(['timepoint'], axis=1, inplace=True)
return X
def transform(self, X, y=None):
if self.how_dummify == 'get_dummies':
X = self.transf_dummies(X)
elif self.how_dummify == 'encode':
X = self.label_encode(X)
return X
class clean_data(BaseEstimator, TransformerMixin):
def __init__(self, drop_astro_id=True, timepoint='R+7', target='telo means'):
self.drop_astro_id = drop_astro_id
self.timepoint_target = f'{timepoint} {target}'
self.timepoint = timepoint
self.target = target
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# enforcing col types
cols = list(X.columns)
for col in cols:
if 'individual telomeres' in col or 'telo means' in col:
X[col] = X[col].astype('float64')
else:
X[col] = X[col].astype('int64')
if self.drop_astro_id:
X.drop(['astro id'], axis=1, inplace=True)
X.reset_index(drop=True, inplace=True)
target_cols = ['telo means', 'Q1', 'Q4']
target_cols.remove(self.target)
for item in target_cols:
for col in X.columns:
if item in col:
X.drop([col], axis=1, inplace=True)
# if 'telo means' in self.target:
# X.drop([f'{timepoint} Q1', f'{timepoint} Q4'], axis=1, inplace=True)
# elif 'Q1' in self.target:
# X.drop([f'{timepoint} Q1', f'{timepoint} Q4'], axis=1, inplace=True)
# X = X[['encoded_timepoint', 'individual telomeres', self.timepoint_target]].copy()
return X
def cv_score_fit_mae_test(train_set=None, test_set=None, target=None,
model=None, cv=5, scoring='neg_mean_absolute_error', verbose=True):
random.seed(888)
row = []
features = [col for col in train_set.columns if col != target and col != 'astro id']
X_train = train_set[features].copy()
X_test = test_set[features].copy()
y_train = train_set[target].copy()
y_test = test_set[target].copy()
# cv
scores = -1 * cross_val_score(model, X_train, y_train, cv=5, scoring=scoring)
if verbose:
print(f'MAE per CV fold: \n{scores} \n')
print(f'MEAN of MAE all folds: {scores.mean()}')
print(f'STD of MAE all folds: {scores.std()}\n')
# fitting the model
model.fit(X_train, y_train)
# predict y_test from X_test - this is using the train/test split w/o shuff;ing
predict_y_test = model.predict(X_test)
if verbose:
print(f"MAE of predict_y_test & y_test: {mean_absolute_error(y_test, predict_y_test)}")
print(f'R2 between predict_y_test & y_test: {r2_score(y_test, predict_y_test)}')
row.append(['XGBoost', features, target, round(scores.mean(), 4),
round(scores.std(), 4),
round(mean_absolute_error(y_test, predict_y_test), 4),
round(r2_score(y_test, predict_y_test), 4)])
return model, row
def myMetric(x, y):
r = stats.pearsonr(x, y)[0]
return 1 - r
def plot_dendogram(Z, target=None, indexer=None):
with plt.style.context('fivethirtyeight' ):
plt.figure(figsize=(10, 2.5))
plt.title(f'Dendrogram of clusters by {target}', fontsize=22, fontweight='bold')
plt.xlabel('astro IDs', fontsize=22, fontweight='bold')
plt.ylabel('distance', fontsize=22, fontweight='bold')
hac.dendrogram(Z, labels=indexer, leaf_rotation=90., # rotates the x axis labels
leaf_font_size=15., ) # font size for the x axis labels
plt.show()
def plot_results2(timeSeries, D, cut_off_level, y_size, x_size, verbose, time, target):
result = pd.Series(hac.fcluster(D, cut_off_level, criterion='maxclust'))
if verbose:
clusters = result.unique()
fig = plt.subplots(figsize=(x_size, y_size))
mimg = math.ceil(cut_off_level/2.0)
gs = gridspec.GridSpec(mimg,2, width_ratios=[1,1])
cluster_indexed = pd.concat([result, timeSeries.reset_index(drop=True)], axis=1)
columns = list(cluster_indexed.columns[1:])
columns = ['clusters'] + columns
cluster_indexed.columns = columns
for ipic, c in enumerate(clusters):
clustered = cluster_indexed[cluster_indexed['clusters'] == c].copy()
print(ipic, "Cluster number %d has %d elements" % (c, len(clustered['astro id'])))
melt = clustered.melt(id_vars=['astro id', 'clusters'], var_name=time,value_name=target)
ax1 = plt.subplot(gs[ipic])
sns.lineplot(x=time, y=target, hue='astro id', data=melt, legend=False, ax=ax1)
ax1.set_title((f'Cluster number {c}'), fontsize=15, fontweight='bold')
plt.tight_layout()
return result
def cluster_data_return_df(df, target='inversions', time='timepoint', cut_off_n=4,
metric=myMetric, method='single',
y_size=6, x_size=10, verbose=True):
df = df.copy()
label_enc = LabelEncoder()
labels = list(df[time])
encoded_labels = list(LabelEncoder().fit_transform(df[time]))
cypher_dict = dict(zip(encoded_labels, labels))
df[time] = LabelEncoder().fit_transform(df[time])
df = df.pivot(index='astro id', columns=time, values=target).reset_index()
# run the clustering
cluster_Z = hac.linkage(df, method=method, metric=metric)
if verbose:
plot_dendogram(cluster_Z, target=target, indexer=df.index)
# return df bearing cluster groups
indexed_clusters = plot_results2(df, cluster_Z, cut_off_n, y_size, x_size, verbose, time, target)
# concat clusters to original df and return
ready_concat = df.reset_index(drop=True)
clustered_index_df = | pd.concat([ready_concat, indexed_clusters], axis=1) | pandas.concat |
import requests
import urllib.request
import urllib3
import datetime
from bs4 import BeautifulSoup
import pandas as pd
import feedparser
from datetime import date
from datetime import datetime
import re
from warnings import warn
_country = 'Canada'
_src_cat = 'Government Website'
_columns = ['start_date', 'country', 'region', 'subregion', 'source_url', 'source_category', 'source_title', 'source_full_text']
def _load_ontario(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Ontario.
"""
# Start searching at `end_date` date
end_str = end_date.strftime('%Y/%m/%d')
start_str = start_date.strftime('%Y/%m/%d')
base_url = 'https://news.ontario.ca/en/search?content_type=all&utf8=%E2%9C%93&date_range_end=' + end_str + '&date_range_start=' + start_str + '&date_select=desc&page='
region = 'Ontario'
subregion = ''
# Specific structure for news.contario.ca/archive
rows = []
page = 1
while True:
if verbose: print('Searching page ', page)
target = base_url + str(page)
response = requests.get(target)
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.findAll('article')
if len(articles) == 0:
if verbose: print('No articles found.')
return pd.DataFrame(rows, columns=_columns)
for article in articles:
smallersoup = BeautifulSoup(str(article), "html.parser")
link = smallersoup.findAll('a')[0]['href']
title = smallersoup.findAll('a')[0].string
pub_date = datetime.strptime(smallersoup.time.string.replace('.', ''), "%B %d, %Y %I:%M %p")
if pub_date < start_date:
return pd.DataFrame(rows, columns=_columns)
if pub_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
response = requests.get(link)
linksoup = BeautifulSoup(response.text, "html.parser")
full_text = linksoup.article.text
row = [pub_date, _country, region, subregion, link, _src_cat, title, full_text]
rows.append(row)
page += 1
def _load_manitoba(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved.
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Manitoba.
"""
month_start = datetime(start_date.year, start_date.month, 1) # If the date range does not begin on the start of the month it skips the month in its entirety.
dates_between = pd.date_range(start=month_start, end=end_date, freq="MS")
url_base = 'https://news.gov.mb.ca'
# reversed to account for the most recent to least recent convention adopted when loading articles
targets = reversed([url_base + '/news/index.html?month=' + str(date.month) + '&year=' + str(date.year) + '&day=01&bgnG=GO&d=' for date in dates_between])
region = 'Manitoba'
subregion = ''
rows = []
for target in targets:
if verbose:
print('Searching link', target)
if target.startswith(url_base):
response = requests.get(target)
soup = BeautifulSoup(response.text, "html.parser")
items = soup.findAll("div", {"class": "maincontent"})
smallersoup = BeautifulSoup(str(items), "html.parser")
for article in smallersoup.findAll('h2'):
a = article.a
relative_link = a['href']
link = url_base + relative_link.split('..')[-1]
title = a.string
response = requests.get(link)
linksoup = BeautifulSoup(response.text, "html.parser")
date_text = linksoup.findAll("span", {"class": "article_date"})[0].string
pub_date = datetime.strptime(date_text, '%B %d, %Y')
if pub_date < start_date:
return pd.DataFrame(rows, columns=_columns)
if pub_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
full_text = linksoup.findAll("div", {"class": ""})[0].text
row = [pub_date, _country, region, subregion, link, _src_cat, title, full_text]
rows.append(row)
return | pd.DataFrame(rows, columns=_columns) | pandas.DataFrame |
import os
from pathlib import Path
import sys
from time import strptime
import path_config
import requests
from bs4 import BeautifulSoup
import pandas as pd
class EspnTournament():
def __init__(self) -> None:
self.tournament_info = {
"tournament_id":"",
"tournament_name":"",
"tournament_date":"",
"tournament_purse":"",
"win_total":"",
"tournament_size":"",
"winner_name":"",
"winner_id":"",
"season_id":"",
}
def __getitem__(self, i):
return self.tournament_info[i]
def set_all_w(self, w_name, w_id, w_total):
self.tournament_info["winner_name"] = w_name
self.tournament_info["winner_id"] = w_id
self.tournament_info["win_total"] = w_total
def set_all_missing(self):
self.tournament_info["win_total"] = None
self.tournament_info["tournament_size"] = None
self.tournament_info["winner_name"] = None
self.tournament_info["winner_id"] = None
def get_tournament_id(self):
return self.tournament_info["tournament_id"]
def set_tournament_id(self, url):
"""Set tournament id from a url.
Parameters
----------
url : str
ESPN tournament url.
Examples
--------
>>> espn_t = EspnTournament()
>>> t_url = "https://www.espn.com/golf/leaderboard?tournamentId=3802"
>>> espn_t.set_tournament_id(t_url)
"""
t_id = url[url.rfind("=") + 1:]
self.tournament_info["tournament_id"] = t_id
def get_tournament_name(self):
return self.tournament_info["tournament_name"]
def set_tournament_name(self, tourn_meta):
"""Set tournament name from a tournament meta.
Parameters
----------
tournament_meta : element.Tag
child of Leaderboard__Header class to find tournament name.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_id(tourn_meta)
"""
tourn_name = tourn_meta.find("h1").text
self.tournament_info["tournament_name"] = tourn_name
def parse_espn_dates(self, date, identifier, b_identifier=True):
"""Parse for subset date of the original date
Parameters
----------
date : str
ESPN tournament date to parse.
identifier : str
Identifier to be searched for.
b_identifier : bool, optional
Flag to tell where subset search begins.
Returns
-------
str
Parsed ESPN date.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.parse_espn_dates("Oct 5-8 2018", "-")
"Oct 5"
"""
if b_identifier:
if date.find(identifier) != -1:
b_idx = date.find(identifier)
# Should return month
n_date = date[:b_idx].rstrip()
return n_date
else:
# special case of only one date in link
b_idx = date.find(",")
n_date = date[:b_idx]
return n_date
else:
if date.find(identifier) != -1:
a_idx = date.find(identifier)
# Should return day
return date[a_idx: ]
else:
print("Did not find identifier in string for: ", date)
def date_parser(self, date):
"""Reformat ESPN tournament date.
Parameters
----------
date : str
Date to parse.
Returns
-------
str
Reformatted ESPN date.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.date_parser("Oct 5-8 2018")
"10/5/2018"
"""
year = date[date.rfind(" ")+1:]
month_and_day = self.parse_espn_dates(date, "-")
day = self.parse_espn_dates(month_and_day, " ", b_identifier=False)
day = day.lstrip()
month = self.parse_espn_dates(month_and_day, " ", b_identifier=True)
month_abr = month[:3]
month_number = strptime(month_abr, "%b").tm_mon
date_str = str(month_number) + "/" + day + "/" + year
return date_str
def get_date(self):
return self.tournament_info["tournament_date"]
def set_date(self, tourn_meta):
"""Set tournament date from a tournament meta.
Parameters
----------
tourn_meta : element.Tag
child of Leaderboard__Header class.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_date(tourn_meta)
"""
tourn_date = tourn_meta.find("span").text
t_date = self.date_parser(tourn_date)
self.tournament_info["tournament_date"] = t_date
def get_tournament_purse(self):
return self.tournament_info["tournament_purse"]
def set_tournament_purse(self, tourn_header):
"""Set tournament purse from a tournament header.
Parameters
----------
tourn_header : element.Tag
Leaderboard__Header class.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_purse(tourn_header)
"""
purse_class = tourn_header.find("div", class_="n7 clr-gray-04").text
# string find method
purse_start = purse_class.find("$") + 1
if purse_class.find("D") != -1:
purse_end = purse_class.find("D")
purse = purse_class[purse_start:purse_end]
else:
purse = purse_class[purse_start:]
purse = purse.replace(",", "")
self.tournament_info["tournament_purse"] = purse
def get_winning_score(self):
return self.tournament_info["win_total"]
def set_winning_score(self, t_body):
"""Set winning score total from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winning_score(t_body)
"""
# tournament winner's total's data
tourn_totals = t_body.find("td", class_="Table__TD")
if tourn_totals:
totals = tourn_totals.find_next_siblings()
if len(totals) == 9:
# selects 4 round (72 hole) total
total = totals[-3].text
self.tournament_info["win_total"] = total
else:
total = totals[-3].text
if len(total) == 0:
self.tournament_info["win_total"] = None
else:
self.tournament_info["win_total"] = total
def get_tournament_size(self):
return self.tournament_info["tournament_size"]
def set_tournament_size(self, t_body):
"""Set tournament size from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_tournament_size(t_body)
"""
players = t_body.find_all("tr", class_="Table__TR Table__even")
if players is not None:
num_players = len(players)
self.tournament_info["tournament_size"] = num_players
def get_winner_name(self):
return self.tournament_info["winner_name"]
def set_winner_name(self, t_body):
"""Set winner name from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winner_name(t_body)
"""
winner = t_body.find("a")
if winner:
name = winner.text
self.tournament_info["winner_name"] = name
else:
self.tournament_info["winner_name"] = None
def get_winner_id(self):
return self.tournament_info["winner_id"]
def set_winner_id(self, t_body):
"""Set winner id from tournament body.
Parameters
----------
t_body : element.Tag
Child of ResponsiveTable.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_winner_id(t_body)
"""
winner = t_body.find("a")
if winner:
winner_id = winner["href"]
# substring start and end indexes
start_winner = winner_id.find("id/") + 3
end_winner = winner_id.rfind("/")
id = winner_id[start_winner:end_winner]
self.tournament_info["winner_id"] = id
else:
self.tournament_info["winner_id"] = None
def get_season_id(self):
return self.tournament_info["season_id"]
def set_season_id(self, s_id):
"""Set season identifier from s_id.
Parameters
----------
s_id : int
Season identifier to set.
Examples
--------
>>> espn_t = EspnTournament()
>>> espn_t.set_season_id(2018)
"""
self.tournament_info["season_id"] = s_id
class EspnSeason():
def __init__(self, start, end=None) -> None:
b_url = "https://www.espn.com/golf/schedule/_/season/"
if end is not None:
season_urls = [b_url + str(season) for season in range(start, end+1)]
self.end = end
else:
season_urls = [f"{b_url}{start}"]
self.end = None
self.start = start
self.season_urls = season_urls
self.season_data = []
def retrieve_tournament_info(self, t_url, s_id):
"""Retrieve tournament information from tournament url and season id.
Parameters
----------
t_url : str
Tournament url to extract information.
s_id : int
Season identifier.
Examples
--------
>>> tournament_url = "https://www.espn.com/golf/leaderboard?tournamentId=3802"
>>> espn_t.retrieve_tournament_info(tournament_url, 2017)
"""
espn_t = EspnTournament()
with requests.Session() as session:
page = session.get(t_url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, "html.parser")
header = soup.find("div", class_="Leaderboard__Header")
mt4 = header.find_all("div", class_="mt4")
tourn_meta = mt4[-1]
espn_t.set_tournament_id(t_url)
espn_t.set_tournament_name(tourn_meta)
espn_t.set_date(tourn_meta)
espn_t.set_tournament_purse(header)
# Table's on webpage. index with -1 in case of playoff table
tourn_tables = soup.select("div.ResponsiveTable")
if tourn_tables:
# win_total, tournamnet_size, winner_name, winner_id
tourn_table = tourn_tables[-1]
tourn_body = tourn_table.find("tbody", class_="Table__TBODY")
espn_t.set_winning_score(tourn_body)
espn_t.set_tournament_size(tourn_body)
espn_t.set_winner_name(tourn_body)
espn_t.set_winner_id(tourn_body)
espn_t.set_season_id(s_id)
if espn_t.get_tournament_id() == "2277":
espn_t.set_all_w("<NAME>", "1037", "265")
else:
print(f"No div.ResponsiveTable, (Tournament {espn_t.get_tournament_id()} Cancelled)")
espn_t.set_all_missing()
espn_t.set_season_id(s_id)
self.season_data.append(espn_t)
def retrieve_season(self, season_url):
"""Retrieve season from season url.
Parameters
----------
season_url : str
Season url to extract information.
Examples
--------
>>> espn_s = EspnSeason(2018)
>>> season_url = "https://www.espn.com/golf/schedule/_/season/2018"
>>> espn_s.retrieve_season(season_url)
"""
with requests.Session() as session:
page = session.get(season_url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, "html.parser")
season_table = soup.select("div.ResponsiveTable")
if season_table is not None:
season_body = season_table[0].find("tbody", class_="Table__TBODY")
tournaments = season_body.find_all("div", class_="eventAndLocation__innerCell")
if tournaments is not None:
for tournament in tournaments:
tournament_url = tournament.find("a")
if tournament_url:
t_url = tournament_url["href"]
print(f"Fetching {t_url} data")
season_id = season_url[season_url.rfind("/")+1 :]
self.retrieve_tournament_info(t_url, season_id)
else:
print(f"Error retrieving page. page status code: {page.status_code}")
def retrieve_all_seasons(self):
"""Retrieve all seasons set from constructor.
Examples
--------
>>> espn_s = EspnSeason(2018)
>>> espn_s.retrieve_all_seasons()
"""
for season in self.season_urls:
self.retrieve_season(season)
def feed_season_data(self):
"""Feed all season data held.
Returns
-------
pd.DataFrame
Season data in dataframe.
Examples
--------
>>> e_season = EspnSeason(2018)
>>> e_season.retrieve_all_seasons()
>>> df = e_season.feed_season_data()
"""
if self.season_data is not None:
data = [tournament.tournament_info for tournament in self.season_data]
df = pd.DataFrame(data)
df["tournament_purse"] = pd.to_numeric(df["tournament_purse"], downcast="integer")
df["win_total"] = pd.to_numeric(df["win_total"], downcast="integer")
df["tournament_date"] = pd.to_datetime(df["tournament_date"])
df.sort_values(by=["tournament_date", "season_id"], inplace=True)
if self.end is not None:
f_name = f"espn_tournaments_{self.start}_{self.end}.csv"
else:
f_name = f"espn_tournaments_{self.start}.csv"
file_path = Path(path_config.RAW_TOURNAMENTS, f_name)
df.to_csv(file_path, index=False)
return df
class CleanTournaments():
def __init__(self, df) -> None:
self.df = df
self.cleaned_df = | pd.DataFrame() | pandas.DataFrame |
from copy import deepcopy
from handyspark.ml.base import HandyTransformers
from handyspark.plot import histogram, boxplot, scatterplot, strat_scatterplot, strat_histogram,\
consolidate_plots, post_boxplot
from handyspark.sql.pandas import HandyPandas
from handyspark.sql.transform import _MAPPING, HandyTransform
from handyspark.util import HandyException, dense_to_array, disassemble, ensure_list, check_columns, \
none2default
import inspect
from matplotlib.axes import Axes
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
from operator import itemgetter, add
import pandas as pd
from pyspark.ml.stat import Correlation
from pyspark.ml.feature import Bucketizer
from pyspark.mllib.stat import Statistics
from pyspark.sql import DataFrame, GroupedData, Window, functions as F, Column, Row
from pyspark.ml.feature import VectorAssembler, StandardScaler, PCA
from pyspark.ml.pipeline import Pipeline
from scipy.stats import chi2
from scipy.linalg import inv
def toHandy(self):
"""Converts Spark DataFrame into HandyFrame.
"""
return HandyFrame(self)
def notHandy(self):
return self
DataFrame.toHandy = toHandy
DataFrame.notHandy = notHandy
def agg(f):
f.__is_agg = True
return f
def inccol(f):
f.__is_inccol = True
return f
class Handy(object):
def __init__(self, df):
self._df = df
# classification
self._is_classification = False
self._nclasses = None
self._classes = None
# transformers
self._imputed_values = {}
self._fenced_values = {}
# groups / strata
self._group_cols = None
self._strata = None
self._strata_object = None
self._strata_plot = None
self._clear_stratification()
self._safety_limit = 1000
self._safety = True
self._update_types()
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ['_df', '_strata_object', '_strata_plot']:
setattr(result, k, deepcopy(v, memo))
return result
def __getitem__(self, *args):
if isinstance(args[0], tuple):
args = args[0]
item = args[0]
n = 20
if len(args) > 1:
n = args[1]
if n is None:
n = -1
if isinstance(item, int):
idx = item + (len(self._group_cols) if self._group_cols is not None else 0)
assert idx < len(self._df.columns), "Invalid column index {}".format(idx)
item = list(self._df.columns)[idx]
if isinstance(item, str):
if self._group_cols is None or len(self._group_cols) == 0:
res = self._take_array(item, n)
if res.ndim > 1:
res = res.tolist()
res = pd.Series(res, name=item)
if self._strata is not None:
strata = list(map(lambda v: v[1].to_dict(), self.strata.iterrows()))
if len(strata) == len(res):
res = pd.concat([pd.DataFrame(strata), res], axis=1).set_index(self._strata).sort_index()
return res
else:
check_columns(self._df, list(self._group_cols) + [item])
pdf = self._df.notHandy().select(list(self._group_cols) + [item])
if n != -1:
pdf = pdf.limit(n)
res = pdf.toPandas().set_index(list(self._group_cols)).sort_index()[item]
return res
@property
def stages(self):
return (len(list(filter(lambda v: '+' == v,
map(lambda s: s.strip()[0],
self._df.rdd.toDebugString().decode().split('\n'))))) + 1)
@property
def statistics_(self):
return self._imputed_values
@property
def fences_(self):
return self._fenced_values
@property
def is_classification(self):
return self._is_classification
@property
def classes(self):
return self._classes
@property
def nclasses(self):
return self._nclasses
@property
def response(self):
return self._response
@property
def ncols(self):
return len(self._types)
@property
def nrows(self):
return self._df.count()
@property
def shape(self):
return (self.nrows, self.ncols)
@property
def strata(self):
if self._strata is not None:
return pd.DataFrame(data=self._strata_combinations, columns=self._strata)
@property
def strata_colnames(self):
if self._strata is not None:
return list(map(str, ensure_list(self._strata)))
else:
return []
def _stratify(self, strata):
return HandyStrata(self, strata)
def _clear_stratification(self):
self._strata = None
self._strata_object = None
self._strata_plot = None
self._strata_combinations = []
self._strata_raw_combinations = []
self._strata_clauses = []
self._strata_raw_clauses = []
self._n_cols = 1
self._n_rows = 1
def _set_stratification(self, strata, raw_combinations, raw_clauses, combinations, clauses):
if strata is not None:
assert len(combinations[0]) == len(strata), "Mismatched number of combinations and strata!"
self._strata = strata
self._strata_raw_combinations = raw_combinations
self._strata_raw_clauses = raw_clauses
self._strata_combinations = combinations
self._strata_clauses = clauses
self._n_cols = len(set(map(itemgetter(0), combinations)))
try:
self._n_rows = len(set(map(itemgetter(1), combinations)))
except IndexError:
self._n_rows = 1
def _build_strat_plot(self, n_rows, n_cols, **kwargs):
fig, axs = plt.subplots(n_rows, n_cols, **kwargs)
if n_rows == 1:
axs = [axs]
if n_cols == 1:
axs = [axs]
self._strata_plot = (fig, [ax for col in np.transpose(axs) for ax in col])
def _update_types(self):
self._types = list(map(lambda t: (t.name, t.dataType.typeName()), self._df.schema.fields))
self._numerical = list(map(itemgetter(0), filter(lambda t: t[1] in ['byte', 'short', 'integer', 'long',
'float', 'double'], self._types)))
self._continuous = list(map(itemgetter(0), filter(lambda t: t[1] in ['double', 'float'], self._types)))
self._categorical = list(map(itemgetter(0), filter(lambda t: t[1] in ['byte', 'short', 'integer', 'long',
'boolan', 'string'], self._types)))
self._array = list(map(itemgetter(0), filter(lambda t: t[1] in ['array', 'map'], self._types)))
self._string = list(map(itemgetter(0), filter(lambda t: t[1] in ['string'], self._types)))
def _take_array(self, colname, n):
check_columns(self._df, colname)
datatype = self._df.notHandy().select(colname).schema.fields[0].dataType.typeName()
rdd = self._df.notHandy().select(colname).rdd.map(itemgetter(0))
if n == -1:
data = rdd.collect()
else:
data = rdd.take(n)
return np.array(data, dtype=_MAPPING.get(datatype, 'object'))
def _value_counts(self, colnames, dropna=True, raw=False):
colnames = ensure_list(colnames)
strata = self.strata_colnames
colnames = strata + colnames
check_columns(self._df, colnames)
data = self._df.notHandy().select(colnames)
if dropna:
data = data.dropna()
values = (data.groupby(colnames).agg(F.count('*').alias('value_counts'))
.toPandas().set_index(colnames).sort_index()['value_counts'])
if not raw:
for level, col in enumerate(ensure_list(self._strata)):
if not isinstance(col, str):
values.index.set_levels(pd.Index(col._clauses[1:-1]), level=level, inplace=True)
values.index.set_names(col.colname, level=level, inplace=True)
return values
def _fillna(self, target, values):
assert isinstance(target, DataFrame), "Target must be a DataFrame"
items = values.items()
for colname, v in items:
if isinstance(v, dict):
clauses = v.keys()
whens = ' '.join(['WHEN (({clause}) AND (isnan({col}) OR isnull({col}))) THEN {quote}{filling}{quote}'
.format(clause=clause, col=colname, filling=v[clause],
quote='"' if isinstance(v[clause], str) else '')
for clause in clauses])
else:
whens = ('WHEN (isnan({col}) OR isnull({col})) THEN {quote}{filling}{quote}'
.format(col=colname, filling=v,
quote='"' if isinstance(v, str) else ''))
expression = F.expr('CASE {expr} ELSE {col} END'.format(expr=whens, col=colname))
target = target.withColumn(colname, expression)
return target
def __stat_to_dict(self, colname, stat):
if len(self._strata_clauses):
if isinstance(stat, pd.Series):
stat = stat.to_frame(colname)
return {clause: stat.query(raw_clause)[colname].iloc[0]
for clause, raw_clause in zip(self._strata_clauses, self._strata_raw_clauses)}
else:
return stat[colname]
def _fill_values(self, continuous, categorical, strategy):
values = {}
colnames = list(map(itemgetter(0), filter(lambda t: t[1] == 'mean', zip(continuous, strategy))))
values.update(dict([(col, self.__stat_to_dict(col, self.mean(col))) for col in colnames]))
colnames = list(map(itemgetter(0), filter(lambda t: t[1] == 'median', zip(continuous, strategy))))
values.update(dict([(col, self.__stat_to_dict(col, self.median(col))) for col in colnames]))
values.update(dict([(col, self.__stat_to_dict(col, self.mode(col)))
for col in categorical if col in self._categorical]))
return values
def __fill_self(self, continuous, categorical, strategy):
continuous = ensure_list(continuous)
categorical = ensure_list(categorical)
check_columns(self._df, continuous + categorical)
strategy = none2default(strategy, 'mean')
if continuous == ['all']:
continuous = self._continuous
if categorical == ['all']:
categorical = self._categorical
if isinstance(strategy, (list, tuple)):
assert len(continuous) == len(strategy), "There must be a strategy to each column."
else:
strategy = [strategy] * len(continuous)
values = self._fill_values(continuous, categorical, strategy)
self._imputed_values.update(values)
res = HandyFrame(self._fillna(self._df, values), self)
return res
def _dense_to_array(self, colname, array_colname):
check_columns(self._df, colname)
res = dense_to_array(self._df.notHandy(), colname, array_colname)
return HandyFrame(res, self)
def _agg(self, name, func, colnames):
colnames = none2default(colnames, self._df.columns)
colnames = ensure_list(colnames)
check_columns(self._df, self.strata_colnames + [col for col in colnames if not isinstance(col, Column)])
if func is None:
func = getattr(F, name)
res = (self._df.notHandy()
.groupby(self.strata_colnames)
.agg(*(func(col).alias(str(col)) for col in colnames if str(col) not in self.strata_colnames))
.toPandas())
if len(res) == 1:
res = res.iloc[0]
res.name = name
return res
def _calc_fences(self, colnames, k=1.5, precision=.01):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
strata = self.strata_colnames
pdf = (self._df.notHandy()
.groupby(strata)
.agg(F.count(F.lit(1)).alias('nrows'),
*[F.expr('approx_percentile({}, {}, {})'.format(c, q, 1./precision)).alias('{}_{}%'.format(c, int(q * 100)))
for q in [.25, .50, .75] for c in colnames],
*[F.mean(c).alias('{}_mean'.format(c)) for c in colnames]).toPandas())
for col in colnames:
pdf.loc[:, '{}_iqr'.format(col)] = pdf.loc[:, '{}_75%'.format(col)] - pdf.loc[:, '{}_25%'.format(col)]
pdf.loc[:, '{}_lfence'.format(col)] = pdf.loc[:, '{}_25%'.format(col)] - k * pdf.loc[:, '{}_iqr'.format(col)]
pdf.loc[:, '{}_ufence'.format(col)] = pdf.loc[:, '{}_75%'.format(col)] + k * pdf.loc[:, '{}_iqr'.format(col)]
return pdf
def _calc_mahalanobis_distance(self, colnames, output_col='__mahalanobis'):
"""Computes Mahalanobis distance from origin
"""
sdf = self._df.notHandy()
check_columns(sdf, colnames)
# Builds pipeline to assemble feature columns and scale them
assembler = VectorAssembler(inputCols=colnames, outputCol='__features')
scaler = StandardScaler(inputCol='__features', outputCol='__scaled', withMean=True)
pipeline = Pipeline(stages=[assembler, scaler])
features = pipeline.fit(sdf).transform(sdf)
# Computes correlation between features and inverts it
# Since we scaled the features, we can assume they have unit variance
# and therefore, correlation and covariance matrices are the same!
mat = Correlation.corr(features, '__scaled').head()[0].toArray()
inv_mat = inv(mat)
# Builds Pandas UDF to compute Mahalanobis distance from origin
# sqrt((V - 0) * inv_M * (V - 0))
try:
import pyarrow
@F.pandas_udf('double')
def pudf_mult(v):
return v.apply(lambda v: np.sqrt(np.dot(np.dot(v, inv_mat), v)))
except:
@F.udf('double')
def pudf_mult(v):
return v.apply(lambda v: np.sqrt(np.dot(np.dot(v, inv_mat), v)))
# Convert feature vector into array
features = dense_to_array(features, '__scaled', '__array_scaled')
# Computes Mahalanobis distance and flags as outliers all elements above critical value
distance = (features
.withColumn('__mahalanobis', pudf_mult('__array_scaled'))
.drop('__features', '__scaled', '__array_scaled'))
return distance
def _set_mahalanobis_outliers(self, colnames, critical_value=.999,
input_col='__mahalanobis', output_col='__outlier'):
"""Compares Mahalanobis distances to critical values using
Chi-Squared distribution to identify possible outliers.
"""
distance = self._calc_mahalanobis_distance(colnames)
# Computes critical value
critical_value = chi2.ppf(critical_value, len(colnames))
# Computes Mahalanobis distance and flags as outliers all elements above critical value
outlier = (distance.withColumn(output_col, F.col(input_col) > critical_value))
return outlier
def _calc_bxp_stats(self, fences_df, colname, showfliers=False):
strata = self.strata_colnames
clauses = self._strata_raw_clauses
if not len(clauses):
clauses = [None]
qnames = ['25%', '50%', '75%', 'mean', 'lfence', 'ufence']
col_summ = fences_df[strata + ['{}_{}'.format(colname, q) for q in qnames] + ['nrows']]
col_summ.columns = strata + qnames + ['nrows']
if len(strata):
col_summ = col_summ.set_index(strata)
lfence, ufence = col_summ[['lfence']], col_summ[['ufence']]
expression = None
for clause in clauses:
if clause is not None:
partial = F.col(colname).between(lfence.query(clause).iloc[0, 0], ufence.query(clause).iloc[0, 0])
partial &= F.expr(clause)
else:
partial = F.col(colname).between(lfence.iloc[0, 0], ufence.iloc[0, 0])
if expression is None:
expression = partial
else:
expression |= partial
outlier = self._df.notHandy().withColumn('__{}_outlier'.format(colname), ~expression)
minmax = (outlier
.filter('not __{}_outlier'.format(colname))
.groupby(strata)
.agg(F.min(colname).alias('min'),
F.max(colname).alias('max'))
.toPandas())
if len(strata):
minmax = [minmax.query(clause).iloc[0][['min', 'max']].values for clause in clauses]
else:
minmax = [minmax.iloc[0][['min', 'max']].values]
fliers_df = outlier.filter('__{}_outlier'.format(colname))
fliers_df = [fliers_df.filter(clause) for clause in clauses] if len(strata) else [fliers_df]
fliers_count = [df.count() for df in fliers_df]
if showfliers:
fliers = [(df
.select(F.abs(F.col(colname)).alias(colname))
.orderBy(F.desc(colname))
.limit(1000)
.toPandas()[colname].values) for df in fliers_df]
else:
fliers = [[]] * len(clauses)
stats = [] # each item corresponds to a different clause - all items belong to the same column
nrows = []
for clause, whiskers, outliers in zip(clauses, minmax, fliers):
summary = col_summ
if clause is not None:
summary = summary.query(clause)
item = {'mean': summary['mean'].values[0],
'med': summary['50%'].values[0],
'q1': summary['25%'].values[0],
'q3': summary['75%'].values[0],
'whislo': whiskers[0],
'whishi': whiskers[1],
'fliers': outliers}
stats.append(item)
nrows.append(summary['nrows'].values[0])
if not len(nrows):
nrows = summary['nrows'].values[0]
return stats, fliers_count, nrows
def set_response(self, colname):
check_columns(self._df, colname)
self._response = colname
if colname is not None:
if colname not in self._continuous:
self._is_classification = True
self._classes = self._df.notHandy().select(colname).rdd.map(itemgetter(0)).distinct().collect()
self._nclasses = len(self._classes)
return self
def disassemble(self, colname, new_colnames=None):
check_columns(self._df, colname)
res = disassemble(self._df.notHandy(), colname, new_colnames)
return HandyFrame(res, self)
def to_metrics_RDD(self, prob_col, label):
check_columns(self._df, [prob_col, label])
return self.disassemble(prob_col).select('{}_1'.format(prob_col), F.col(label).cast('double')).rdd.map(tuple)
def corr(self, colnames=None, method='pearson'):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
if self._strata is not None:
colnames = sorted([col for col in colnames if col not in self.strata_colnames])
correlations = Statistics.corr(self._df.notHandy().select(colnames).dropna().rdd.map(lambda row: row[0:]), method=method)
pdf = pd.DataFrame(correlations, columns=colnames, index=colnames)
return pdf
def fill(self, *args, continuous=None, categorical=None, strategy=None):
if len(args) and isinstance(args[0], DataFrame):
return self._fillna(args[0], self._imputed_values)
else:
return self.__fill_self(continuous=continuous, categorical=categorical, strategy=strategy)
@agg
def isnull(self, ratio=False):
def func(colname):
return F.sum(F.isnull(colname).cast('int')).alias(colname)
name = 'missing'
if ratio:
name += '(ratio)'
missing = self._agg(name, func, self._df.columns)
if ratio:
nrows = self._agg('nrows', F.sum, F.lit(1))
if isinstance(missing, pd.Series):
missing = missing / nrows["Column<b'1'>"]
else:
missing.iloc[:, 1:] = missing.iloc[:, 1:].values / nrows["Column<b'1'>"].values.reshape(-1, 1)
if len(self.strata_colnames):
missing = missing.set_index(self.strata_colnames).T.unstack()
missing.name = name
return missing
@agg
def nunique(self, colnames=None):
res = self._agg('nunique', F.approx_count_distinct, colnames)
if len(self.strata_colnames):
res = res.set_index(self.strata_colnames).T.unstack()
res.name = 'nunique'
return res
def outliers(self, colnames=None, ratio=False, method='tukey', **kwargs):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
res = None
if method == 'tukey':
outliers = []
try:
k = float(kwargs['k'])
except KeyError:
k = 1.5
fences_df = self._calc_fences(colnames, k=k, precision=.01)
index = fences_df[self.strata_colnames].set_index(self.strata_colnames).index \
if len(self.strata_colnames) else None
for colname in colnames:
stats, counts, nrows = self._calc_bxp_stats(fences_df, colname, showfliers=False)
outliers.append(pd.Series(counts, index=index, name=colname))
if ratio:
outliers[-1] /= nrows
res = pd.DataFrame(outliers).unstack()
if not len(self.strata_colnames):
res = res.droplevel(0)
name = 'outliers'
if ratio:
name += '(ratio)'
res.name = name
return res
def get_outliers(self, colnames=None, critical_value=.999):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
outliers = self._set_mahalanobis_outliers(colnames, critical_value)
df = outliers.filter('__outlier').orderBy(F.desc('__mahalanobis')).drop('__outlier', '__mahalanobis')
return HandyFrame(df, self)
def remove_outliers(self, colnames=None, critical_value=.999):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
outliers = self._set_mahalanobis_outliers(colnames, critical_value)
df = outliers.filter('not __outlier').drop('__outlier', '__mahalanobis')
return HandyFrame(df, self)
def fence(self, colnames, k=1.5):
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
pdf = self._calc_fences(colnames, k=k)
if len(self.strata_colnames):
pdf = pdf.set_index(self.strata_colnames)
df = self._df.notHandy()
for colname in colnames:
lfence, ufence = pdf.loc[:, ['{}_lfence'.format(colname)]], pdf.loc[:, ['{}_ufence'.format(colname)]]
if len(self._strata_raw_clauses):
whens1 = ' '.join(['WHEN ({clause}) THEN greatest({col}, {fence})'.format(clause=clause,
col=colname,
fence=lfence.query(clause).iloc[0, 0])
for clause in self._strata_raw_clauses])
whens2 = ' '.join(['WHEN ({clause}) THEN least({col}, {fence})'.format(clause=clause,
col=colname,
fence=ufence.query(clause).iloc[0, 0])
for clause in self._strata_raw_clauses])
expression1 = F.expr('CASE {} END'.format(whens1))
expression2 = F.expr('CASE {} END'.format(whens2))
self._fenced_values.update({colname: {clause: [lfence.query(clause).iloc[0, 0],
ufence.query(clause).iloc[0, 0]]
for clause in self._strata_clauses}})
else:
self._fenced_values.update({colname: [lfence.iloc[0, 0], ufence.iloc[0, 0]]})
expression1 = F.expr('greatest({col}, {fence})'.format(col=colname, fence=lfence.iloc[0, 0]))
expression2 = F.expr('least({col}, {fence})'.format(col=colname, fence=ufence.iloc[0, 0]))
df = df.withColumn(colname, expression1).withColumn(colname, expression2)
return HandyFrame(df.select(self._df.columns), self)
@inccol
def value_counts(self, colnames, dropna=True):
return self._value_counts(colnames, dropna)
@inccol
def mode(self, colname):
check_columns(self._df, [colname])
if self._strata is None:
values = (self._df.notHandy().select(colname).dropna()
.groupby(colname).agg(F.count('*').alias('mode'))
.orderBy(F.desc('mode')).limit(1)
.toPandas()[colname][0])
return pd.Series(values, index=[colname], name='mode')
else:
strata = self.strata_colnames
colnames = strata + [colname]
values = (self._df.notHandy().select(colnames).dropna()
.groupby(colnames).agg(F.count('*').alias('mode'))
.withColumn('order', F.row_number().over(Window.partitionBy(strata).orderBy(F.desc('mode'))))
.filter('order == 1').drop('order')
.toPandas().set_index(strata).sort_index()[colname])
values.name = 'mode'
return values
@inccol
def entropy(self, colnames):
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
sdf = self._df.notHandy()
n = sdf.count()
entropy = []
for colname in colnames:
if colname in self._categorical:
res = (self._df
.groupby(self.strata_colnames + [colname])
.agg(F.count('*').alias('value_counts')).withColumn('probability', F.col('value_counts') / n)
.groupby(self.strata_colnames)
.agg(F.sum(F.expr('-log2(probability) * probability')).alias(colname))
.safety_off()
.cols[self.strata_colnames + [colname]][:])
if len(self.strata_colnames):
res.set_index(self.strata_colnames, inplace=True)
res = res.unstack()
else:
res = res[colname]
res.index = [colname]
else:
res = pd.Series(None, index=[colname])
res.name = 'entropy'
entropy.append(res)
return pd.concat(entropy).sort_index()
@inccol
def mutual_info(self, colnames):
def distribution(sdf, colnames):
return sdf.groupby(colnames).agg(F.count('*').alias('__count'))
check_columns(self._df, colnames)
n = len(colnames)
probs = []
sdf = self._df.notHandy()
for i in range(n):
probs.append(distribution(sdf, self.strata_colnames + [colnames[i]]))
if len(self.strata_colnames):
nrows = sdf.groupby(self.strata_colnames).agg(F.count('*').alias('__n'))
else:
nrows = sdf.count()
entropies = self.entropy(colnames)
res = []
for i in range(n):
for j in range(i, n):
if i == j:
mi = pd.Series(entropies[colnames[i]], name='mi').to_frame()
else:
tdf = distribution(sdf, self.strata_colnames + [colnames[i], colnames[j]])
if len(self.strata_colnames):
tdf = tdf.join(nrows, on=self.strata_colnames)
else:
tdf = tdf.withColumn('__n', F.lit(nrows))
tdf = tdf.join(probs[i].toDF(*self.strata_colnames, colnames[i], '__count0'), on=self.strata_colnames + [colnames[i]])
tdf = tdf.join(probs[j].toDF(*self.strata_colnames, colnames[j], '__count1'), on=self.strata_colnames + [colnames[j]])
mi = (tdf
.groupby(self.strata_colnames)
.agg(F.sum(F.expr('log2(__count * __n / (__count0 * __count1)) * __count / __n')).alias('mi'))
.toPandas())
if len(self.strata_colnames):
mi.set_index(self.strata_colnames, inplace=True)
res.append(mi.assign(ci=colnames[j], cj=colnames[i]))
res.append(mi.assign(ci=colnames[i], cj=colnames[j]))
res = pd.concat(res).set_index(['ci', 'cj'], append=len(self.strata_colnames)).sort_index()
res = pd.pivot_table(res, index=self.strata_colnames + ['ci'], columns=['cj'])
res.index.names = self.strata_colnames + ['']
res.columns = res.columns.droplevel(0).rename('')
return res
@agg
def mean(self, colnames):
return self._agg('mean', F.mean, colnames)
@agg
def min(self, colnames):
return self._agg('min', F.min, colnames)
@agg
def max(self, colnames):
return self._agg('max', F.max, colnames)
@agg
def percentile(self, colnames, perc=50, precision=.01):
def func(c):
return F.expr('approx_percentile({}, {}, {})'.format(c, perc/100., 1./precision))
try:
name = {25: 'q1', 50: 'median', 75: 'q3'}[perc]
except KeyError:
name = 'percentile_{}'.format(perc)
return self._agg(name, func, colnames)
@agg
def median(self, colnames, precision=.01):
return self.percentile(colnames, 50, precision)
@agg
def stddev(self, colnames):
return self._agg('stddev', F.stddev, colnames)
@agg
def var(self, colnames):
return self._agg('var', F.stddev, colnames) ** 2
@agg
def q1(self, colnames, precision=.01):
return self.percentile(colnames, 25, precision)
@agg
def q3(self, colnames, precision=.01):
return self.percentile(colnames, 75, precision)
### Boxplot functions
def _strat_boxplot(self, colnames, **kwargs):
n_rows = n_cols = 1
kwds = deepcopy(kwargs)
for kw in ['showfliers', 'precision']:
try:
del kwds[kw]
except KeyError:
pass
if isinstance(colnames, (tuple, list)) and (len(colnames) > 1):
n_rows = self._n_rows
n_cols = self._n_cols
self._build_strat_plot(n_rows, n_cols, **kwds)
return None
@inccol
def boxplot(self, colnames, ax=None, showfliers=True, k=1.5, precision=.01, **kwargs):
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
assert len(colnames), "Only numerical columns can be plot!"
return boxplot(self._df, colnames, ax, showfliers, k, precision)
def _post_boxplot(self, res):
return post_boxplot(self._strata_plot[1], res)
### Scatterplot functions
def _strat_scatterplot(self, colnames, **kwargs):
self._build_strat_plot(self._n_rows, self._n_cols, **kwargs)
return strat_scatterplot(self._df.notHandy(), colnames[0], colnames[1])
@inccol
def scatterplot(self, colnames, ax=None, **kwargs):
assert len(colnames) == 2, "There must be two columns to plot!"
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
assert len(colnames) == 2, "Both columns must be numerical!"
return scatterplot(self._df, colnames[0], colnames[1], ax=ax)
### Histogram functions
def _strat_hist(self, colname, bins=10, **kwargs):
self._build_strat_plot(self._n_rows, self._n_cols, **kwargs)
categorical = True
if colname in self._continuous:
categorical = False
#res = strat_histogram(self._df.notHandy(), colname, bins, categorical)
res = strat_histogram(self._df, colname, bins, categorical)
self._strata_plot[0].suptitle('')
plt.tight_layout()
return res
@inccol
def hist(self, colname, bins=10, ax=None, **kwargs):
# TO DO
# include split per response/columns
assert len(ensure_list(colname)) == 1, "Only single columns can be plot!"
check_columns(self._df, colname)
if colname in self._continuous:
return histogram(self._df, colname, bins=bins, categorical=False, ax=ax)
else:
return histogram(self._df, colname, bins=bins, categorical=True, ax=ax)
class HandyGrouped(GroupedData):
def __init__(self, jgd, df, *args):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
self._cols = args
def agg(self, *exprs):
df = super().agg(*exprs)
handy = deepcopy(self._df._handy)
handy._group_cols = self._cols
return HandyFrame(df, handy)
def __repr__(self):
return "HandyGrouped[%s]" % (", ".join("%s" % c for c in self._group_cols))
class HandyFrame(DataFrame):
"""HandySpark version of DataFrame.
Attributes
----------
cols: HandyColumns
class to access pandas-like column based methods implemented in Spark
pandas: HandyPandas
class to access pandas-like column based methods through pandas UDFs
transformers: HandyTransformers
class to generate Handy transformers
stages: integer
number of stages in the execution plan
response: string
name of the response column
is_classification: boolean
True if response is a categorical variable
classes: list
list of classes for a classification problem
nclasses: integer
number of classes for a classification problem
ncols: integer
number of columns of the HandyFrame
nrows: integer
number of rows of the HandyFrame
shape: tuple
tuple representing dimensionality of the HandyFrame
statistics_: dict
imputation fill value for each feature
If stratified, first level keys are filter clauses for stratification
fences_: dict
fence values for each feature
If stratified, first level keys are filter clauses for stratification
is_stratified: boolean
True if HandyFrame was stratified
values: ndarray
Numpy representation of HandyFrame.
Available methods:
- notHandy: makes it a plain Spark dataframe
- stratify: used to perform stratified operations
- isnull: checks for missing values
- fill: fills missing values
- outliers: returns counts of outliers, columnwise, using Tukey's method
- get_outliers: returns list of outliers using Mahalanobis distance
- remove_outliers: filters out outliers using Mahalanobis distance
- fence: fences outliers
- set_safety_limit: defines new safety limit for collect operations
- safety_off: disables safety limit for a single operation
- assign: appends a new columns based on an expression
- nunique: returns number of unique values in each column
- set_response: sets column to be used as response / label
- disassemble: turns a vector / array column into multiple columns
- to_metrics_RDD: turns probability and label columns into a tuple RDD
"""
def __init__(self, df, handy=None):
super().__init__(df._jdf, df.sql_ctx)
if handy is None:
handy = Handy(self)
else:
handy = deepcopy(handy)
handy._df = self
handy._update_types()
self._handy = handy
self._safety = self._handy._safety
self._safety_limit = self._handy._safety_limit
self.__overriden = ['collect', 'take']
self._strat_handy = None
self._strat_index = None
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__') and name not in self.__overriden:
def wrapper(*args, **kwargs):
try:
res = attr(*args, **kwargs)
except HandyException as e:
raise HandyException(str(e), summary=False)
except Exception as e:
raise HandyException(str(e), summary=True)
if name != 'notHandy':
if not isinstance(res, HandyFrame):
if isinstance(res, DataFrame):
res = HandyFrame(res, self._handy)
if isinstance(res, GroupedData):
res = HandyGrouped(res._jgd, res._df, *args)
return res
return wrapper
else:
return attr
def __repr__(self):
return "HandyFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _get_strata(self):
plot = None
object = None
if self._strat_handy is not None:
try:
object = self._strat_handy._strata_object
except AttributeError:
pass
if object is None:
object = True
try:
plots = self._strat_handy._strata_plot[1]
#if len(plots) > 1:
# plot = plots[self._strat_index]
plot = plots
except (AttributeError, IndexError):
pass
return plot, object
def _gen_row_ids(self, *args):
# EXPERIMENTAL - DO NOT USE!
return (self
.sort(*args)
.withColumn('_miid', F.monotonically_increasing_id())
.withColumn('_row_id', F.row_number().over(Window().orderBy(F.col('_miid'))))
.drop('_miid'))
def _loc(self, lower_bound, upper_bound):
# EXPERIMENTAL - DO NOT USE!
assert '_row_id' in self.columns, "Cannot use LOC without generating `row_id`s first!"
clause = F.col('_row_id').between(lower_bound, upper_bound)
return self.filter(clause)
@property
def cols(self):
"""Returns a class to access pandas-like column based methods implemented in Spark
Available methods:
- min
- max
- median
- q1
- q3
- stddev
- value_counts
- mode
- corr
- nunique
- hist
- boxplot
- scatterplot
"""
return HandyColumns(self, self._handy)
@property
def pandas(self):
"""Returns a class to access pandas-like column based methods through pandas UDFs
Available methods:
- betweeen / between_time
- isin
- isna / isnull
- notna / notnull
- abs
- clip / clip_lower / clip_upper
- replace
- round / truncate
- tz_convert / tz_localize
"""
return HandyPandas(self)
@property
def transformers(self):
"""Returns a class to generate Handy transformers
Available transformers:
- HandyImputer
- HandyFencer
"""
return HandyTransformers(self)
@property
def stages(self):
"""Returns the number of stages in the execution plan.
"""
return self._handy.stages
@property
def response(self):
"""Returns the name of the response column.
"""
return self._handy.response
@property
def is_classification(self):
"""Returns True if response is a categorical variable.
"""
return self._handy.is_classification
@property
def classes(self):
"""Returns list of classes for a classification problem.
"""
return self._handy.classes
@property
def nclasses(self):
"""Returns the number of classes for a classification problem.
"""
return self._handy.nclasses
@property
def ncols(self):
"""Returns the number of columns of the HandyFrame.
"""
return self._handy.ncols
@property
def nrows(self):
"""Returns the number of rows of the HandyFrame.
"""
return self._handy.nrows
@property
def shape(self):
"""Return a tuple representing the dimensionality of the HandyFrame.
"""
return self._handy.shape
@property
def statistics_(self):
"""Returns dictionary with imputation fill value for each feature.
If stratified, first level keys are filter clauses for stratification.
"""
return self._handy.statistics_
@property
def fences_(self):
"""Returns dictionary with fence values for each feature.
If stratified, first level keys are filter clauses for stratification.
"""
return self._handy.fences_
@property
def values(self):
"""Numpy representation of HandyFrame.
"""
# safety limit will kick in, unless explicitly off before
tdf = self
if self._safety:
tdf = tdf.limit(self._safety_limit)
return np.array(tdf.rdd.map(tuple).collect())
def notHandy(self):
"""Converts HandyFrame back into Spark's DataFrame
"""
return DataFrame(self._jdf, self.sql_ctx)
def set_safety_limit(self, limit):
"""Sets safety limit used for ``collect`` method.
"""
self._handy._safety_limit = limit
self._safety_limit = limit
def safety_off(self):
"""Disables safety limit for a single call of ``collect`` method.
"""
self._handy._safety = False
self._safety = False
return self
def collect(self):
"""Returns all the records as a list of :class:`Row`.
By default, its output is limited by the safety limit.
To get original `collect` behavior, call ``safety_off`` method first.
"""
try:
if self._safety:
print('\nINFO: Safety is ON - returning up to {} instances.'.format(self._safety_limit))
return super().limit(self._safety_limit).collect()
else:
res = super().collect()
self._safety = True
return res
except HandyException as e:
raise HandyException(str(e), summary=False)
except Exception as e:
raise HandyException(str(e), summary=True)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
"""
self._handy._safety = False
res = super().take(num)
self._handy._safety = True
return res
def stratify(self, strata):
"""Stratify the HandyFrame.
Stratified operations should be more efficient than group by operations, as they
rely on three iterative steps, namely: filtering the underlying HandyFrame, performing
the operation and aggregating the results.
"""
strata = ensure_list(strata)
check_columns(self, strata)
return self._handy._stratify(strata)
def transform(self, f, name=None, args=None, returnType=None):
"""INTERNAL USE
"""
return HandyTransform.transform(self, f, name=name, args=args, returnType=returnType)
def apply(self, f, name=None, args=None, returnType=None):
"""INTERNAL USE
"""
return HandyTransform.apply(self, f, name=name, args=args, returnType=returnType)
def assign(self, **kwargs):
"""Assign new columns to a HandyFrame, returning a new object (a copy)
with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names.
If the values are callable, they are computed on the DataFrame and
assigned to the new columns.
If the values are not callable, (e.g. a scalar, or string),
they are simply assigned.
Returns
-------
df : HandyFrame
A new HandyFrame with the new columns in addition to
all the existing columns.
"""
return HandyTransform.assign(self, **kwargs)
@agg
def isnull(self, ratio=False):
"""Returns array with counts of missing value for each column in the HandyFrame.
Parameters
----------
ratio: boolean, default False
If True, returns ratios instead of absolute counts.
Returns
-------
counts: Series
"""
return self._handy.isnull(ratio)
@agg
def nunique(self):
"""Return Series with number of distinct observations for all columns.
Parameters
----------
exact: boolean, optional
If True, computes exact number of unique values, otherwise uses an approximation.
Returns
-------
nunique: Series
"""
return self._handy.nunique(self.columns) #, exact)
@inccol
def outliers(self, ratio=False, method='tukey', **kwargs):
"""Return Series with number of outlier observations according to
the specified method for all columns.
Parameters
----------
ratio: boolean, optional
If True, returns proportion instead of counts.
Default is True.
method: string, optional
Method used to detect outliers. Currently, only Tukey's method is supported.
Default is tukey.
Returns
-------
outliers: Series
"""
return self._handy.outliers(self.columns, ratio=ratio, method=method, **kwargs)
def get_outliers(self, colnames=None, critical_value=.999):
"""Returns HandyFrame containing all rows deemed as outliers using
Mahalanobis distance and informed critical value.
Parameters
----------
colnames: list of str, optional
List of columns to be used for computing Mahalanobis distance.
Default includes all numerical columns
critical_value: float, optional
Critical value for chi-squared distribution to classify outliers
according to Mahalanobis distance.
Default is .999 (99.9%).
"""
return self._handy.get_outliers(colnames, critical_value)
def remove_outliers(self, colnames=None, critical_value=.999):
"""Returns HandyFrame containing only rows NOT deemed as outliers
using Mahalanobis distance and informed critical value.
Parameters
----------
colnames: list of str, optional
List of columns to be used for computing Mahalanobis distance.
Default includes all numerical columns
critical_value: float, optional
Critical value for chi-squared distribution to classify outliers
according to Mahalanobis distance.
Default is .999 (99.9%).
"""
return self._handy.remove_outliers(colnames, critical_value)
def set_response(self, colname):
"""Sets column to be used as response in supervised learning algorithms.
Parameters
----------
colname: string
Returns
-------
self
"""
check_columns(self, colname)
return self._handy.set_response(colname)
@inccol
def fill(self, *args, categorical=None, continuous=None, strategy=None):
"""Fill NA/NaN values using the specified methods.
The values used for imputation are kept in ``statistics_`` property
and can later be used to generate a corresponding HandyImputer transformer.
Parameters
----------
categorical: 'all' or list of string, optional
List of categorical columns.
These columns are filled with its coresponding modes (most common values).
continuous: 'all' or list of string, optional
List of continuous value columns.
By default, these columns are filled with its corresponding means.
If a same-sized list is provided in the ``strategy`` argument, it uses
the corresponding straegy for each column.
strategy: list of string, optional
If informed, it must contain a strategy - either ``mean`` or ``median`` - for
each one of the continuous columns.
Returns
-------
df : HandyFrame
A new HandyFrame with filled missing values.
"""
return self._handy.fill(*args, continuous=continuous, categorical=categorical, strategy=strategy)
@inccol
def fence(self, colnames, k=1.5):
"""Caps outliers using lower and upper fences given by Tukey's method,
using 1.5 times the interquartile range (IQR).
The fence values used for capping outliers are kept in ``fences_`` property
and can later be used to generate a corresponding HandyFencer transformer.
For more information, check: https://en.wikipedia.org/wiki/Outlier#Tukey's_fences
Parameters
----------
colnames: list of string
Column names to apply fencing.
k: float, optional
Constant multiplier for the IQR.
Default is 1.5 (corresponding to Tukey's outlier, use 3 for "far out" values)
Returns
-------
df : HandyFrame
A new HandyFrame with capped outliers.
"""
return self._handy.fence(colnames, k=k)
def disassemble(self, colname, new_colnames=None):
"""Disassembles a Vector or Array column into multiple columns.
Parameters
----------
colname: string
Column containing Vector or Array elements.
new_colnames: list of string, optional
Default is None, column names are generated using a sequentially
generated suffix (e.g., _0, _1, etc.) for ``colname``.
If informed, it must have as many column names as elements
in the shortest vector/array of ``colname``.
Returns
-------
df : HandyFrame
A new HandyFrame with the new disassembled columns in addition to
all the existing columns.
"""
return self._handy.disassemble(colname, new_colnames)
def to_metrics_RDD(self, prob_col='probability', label_col='label'):
"""Converts a DataFrame containing predicted probabilities and classification labels
into a RDD suited for use with ``BinaryClassificationMetrics`` object.
Parameters
----------
prob_col: string, optional
Column containing Vectors of probabilities.
Default is 'probability'.
label_col: string, optional
Column containing labels.
Default is 'label'.
Returns
-------
rdd: RDD
RDD of tuples (probability, label)
"""
return self._handy.to_metrics_RDD(prob_col, label_col)
class Bucket(object):
"""Bucketizes a column of continuous values into equal sized bins
to perform stratification.
Parameters
----------
colname: string
Column containing continuous values
bins: integer
Number of equal sized bins to map original values to.
Returns
-------
bucket: Bucket
Bucket object to be used as column in stratification.
"""
def __init__(self, colname, bins=5):
self._colname = colname
self._bins = bins
self._buckets = None
self._clauses = None
def __repr__(self):
return 'Bucket_{}_{}'.format(self._colname, self._bins)
@property
def colname(self):
return self._colname
def _get_buckets(self, df):
check_columns(df, self._colname)
buckets = ([-float('inf')] +
np.linspace(*df.agg(F.min(self._colname),
F.max(self._colname)).rdd.map(tuple).collect()[0],
self._bins + 1).tolist() +
[float('inf')])
buckets[-2] += 1e-7
self._buckets = buckets
return buckets
def _get_clauses(self, buckets):
clauses = []
clauses.append('{} < {:.4f}'.format(self._colname, buckets[1]))
for b, e in zip(buckets[1:-2], buckets[2:-1]):
clauses.append('{} >= {:.4f} and {} < {:.4f}'.format(self._colname, b, self._colname, e))
clauses[-1] = clauses[-1].replace('<', '<=')
clauses.append('{} > {:.4f}'.format(self._colname, buckets[-2]))
self._clauses = clauses
return clauses
class Quantile(Bucket):
"""Bucketizes a column of continuous values into quantiles
to perform stratification.
Parameters
----------
colname: string
Column containing continuous values
bins: integer
Number of quantiles to map original values to.
Returns
-------
quantile: Quantile
Quantile object to be used as column in stratification.
"""
def __repr__(self):
return 'Quantile{}_{}'.format(self._colname, self._bins)
def _get_buckets(self, df):
buckets = ([-float('inf')] +
df.approxQuantile(col=self._colname,
probabilities=np.linspace(0, 1, self._bins + 1).tolist(),
relativeError=0.01) +
[float('inf')])
buckets[-2] += 1e-7
return buckets
class HandyColumns(object):
"""HandyColumn(s) in a HandyFrame.
Attributes
----------
numerical: list of string
List of numerical columns (integer, float, double)
categorical: list of string
List of categorical columns (string, integer)
continuous: list of string
List of continous columns (float, double)
string: list of string
List of string columns (string)
array: list of string
List of array columns (array, map)
"""
def __init__(self, df, handy, strata=None):
self._df = df
self._handy = handy
self._strata = strata
self._colnames = None
self.COLTYPES = {'continuous': self.continuous,
'categorical': self.categorical,
'numerical': self.numerical,
'string': self.string,
'array': self.array}
def __getitem__(self, *args):
if isinstance(args[0], tuple):
args = args[0]
item = args[0]
if self._strata is None:
if self._colnames is None:
if item == slice(None, None, None):
item = self._df.columns
if isinstance(item, str):
try:
# try it as an alias
item = self.COLTYPES[item]
except KeyError:
pass
check_columns(self._df, item)
self._colnames = item
if isinstance(self._colnames, int):
idx = self._colnames + (len(self._handy._group_cols) if self._handy._group_cols is not None else 0)
assert idx < len(self._df.columns), "Invalid column index {}".format(idx)
self._colnames = list(self._df.columns)[idx]
return self
else:
try:
n = item.stop
if n is None:
n = -1
except:
n = 20
if isinstance(self._colnames, (tuple, list)):
res = self._df.notHandy().select(self._colnames)
if n == -1:
if self._df._safety:
print('\nINFO: Safety is ON - returning up to {} instances.'.format(self._df._safety_limit))
n = self._df._safety_limit
if n != -1:
res = res.limit(n)
res = res.toPandas()
self._handy._safety = True
self._df._safety = True
return res
else:
return self._handy.__getitem__(self._colnames, n)
else:
if self._colnames is None:
if item == slice(None, None, None):
item = self._df.columns
if isinstance(item, str):
try:
# try it as an alias
item = self.COLTYPES[item]
except KeyError:
pass
self._strata._handycolumns = item
return self._strata
def __repr__(self):
colnames = ensure_list(self._colnames)
return "HandyColumns[%s]" % (", ".join("%s" % str(c) for c in colnames))
@property
def numerical(self):
"""Returns list of numerical columns in the HandyFrame.
"""
return self._handy._numerical
@property
def categorical(self):
"""Returns list of categorical columns in the HandyFrame.
"""
return self._handy._categorical
@property
def continuous(self):
"""Returns list of continuous columns in the HandyFrame.
"""
return self._handy._continuous
@property
def string(self):
"""Returns list of string columns in the HandyFrame.
"""
return self._handy._string
@property
def array(self):
"""Returns list of array or map columns in the HandyFrame.
"""
return self._handy._array
def mean(self):
return self._handy.mean(self._colnames)
def min(self):
return self._handy.min(self._colnames)
def max(self):
return self._handy.max(self._colnames)
def median(self, precision=.01):
"""Returns approximate median with given precision.
Parameters
----------
precision: float, optional
Default is 0.01
"""
return self._handy.median(self._colnames, precision)
def stddev(self):
return self._handy.stddev(self._colnames)
def var(self):
return self._handy.var(self._colnames)
def percentile(self, perc, precision=.01):
"""Returns approximate percentile with given precision.
Parameters
----------
perc: integer
Percentile to be computed
precision: float, optional
Default is 0.01
"""
return self._handy.percentile(self._colnames, perc, precision)
def q1(self, precision=.01):
"""Returns approximate first quartile with given precision.
Parameters
----------
precision: float, optional
Default is 0.01
"""
return self._handy.q1(self._colnames, precision)
def q3(self, precision=.01):
"""Returns approximate third quartile with given precision.
Parameters
----------
precision: float, optional
Default is 0.01
"""
return self._handy.q3(self._colnames, precision)
def _value_counts(self, dropna=True, raw=True):
assert len(ensure_list(self._colnames)) == 1, "A single column must be selected!"
return self._handy._value_counts(self._colnames, dropna, raw)
def value_counts(self, dropna=True):
"""Returns object containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include counts of missing values.
Returns
-------
counts: Series
"""
assert len(ensure_list(self._colnames)) == 1, "A single column must be selected!"
return self._handy.value_counts(self._colnames, dropna)
def entropy(self):
"""Returns object containing entropy (base 2) of each column.
Returns
-------
entropy: Series
"""
return self._handy.entropy(self._colnames)
def mutual_info(self):
"""Returns object containing matrix of mutual information
between every pair of columns.
Returns
-------
mutual_info: pd.DataFrame
"""
return self._handy.mutual_info(self._colnames)
def mode(self):
"""Returns same-type modal (most common) value for each column.
Returns
-------
mode: Series
"""
colnames = ensure_list(self._colnames)
modes = [self._handy.mode(colname) for colname in colnames]
if len(colnames) == 1:
return modes[0]
else:
return | pd.concat(modes, axis=0) | pandas.concat |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = | pd.DataFrame.from_dict(dicQDA) | pandas.DataFrame.from_dict |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Assignment 2 - Pandas Introduction
# All questions are weighted the same in this assignment.
# ## Part 1
# The following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on [All Time Olympic Games Medals](https://en.wikipedia.org/wiki/All-time_Olympic_Games_medal_table), and does some basic data cleaning.
#
# The columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games, total # of medals. Use this dataset to answer the questions below.
# In[ ]:
import pandas as pd
df = | pd.read_csv('olympics.csv', index_col=0, skiprows=1) | pandas.read_csv |
import sqlite3
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
#@Author: <NAME>
#@Version: 1.0
#@Description: Function for show up the odds history for 2 team
def getOddsHistoryByTeam(team1_id,team2_id):
db_con = sqlite3.connect("database.sqlite")
Liga_match_history = | pd.read_sql_query("select season,home_team_api_id,away_team_api_id,B365H,B365D,B365A from Match where home_team_api_id= %s and away_team_api_id= %s" % (team1_id,team2_id), db_con) | pandas.read_sql_query |
import os
import numpy as np
import pandas as pd
import shap
import json
from ngboost import NGBRegressor
from ngboost.distns import Normal
from ngboost.learners import default_tree_learner
from ngboost.scores import MLE, LogScore
from classes.inputs_gatherer import InputsGatherer
class FeaturesAnalyzer:
"""
Given a dataset composed of features on the columns and days on the rows of a pandas df, this class computes the
best features and their importance
"""
def __init__(self, inputs_gatherer, forecast_type, cfg, logger):
"""
Constructor
:param inputs_gatherer: Inputs Gatherer
:type inputs_gatherer: InputsGatherer
:param forecast_type: Forecast type (MOR | EVE)
:type forecast_type: str
:param cfg: FTP parameters for the files exchange
:type cfg: dict
:param logger: Logger
:type logger: Logger object
"""
# set the variables
self.inputs_gatherer = inputs_gatherer
self.forecast_type = forecast_type
self.cfg = cfg
self.logger = logger
self.dataFrames = None
self.output_folder_name = None
self.current_name = None
self.nan_features = None
def dataset_creator(self):
"""
Build the datasets according to the instructions in the config file in the datasetSettings section
"""
self.inputs_gatherer.dataframe_builder_regions()
def update_datasets(self, name, output_dfs, target_columns):
"""
Initialize folders and add metadata to container of datasets
"""
folder_path = self.inputs_gatherer.output_folder_creator(name)
file_path_df = folder_path + folder_path.split(os.sep)[1] + '_dataset.csv'
if not os.path.isfile(file_path_df):
self.logger.error('File %s does not exist' % file_path_df)
tmp_df = | pd.read_csv(file_path_df) | pandas.read_csv |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Bruce_H_Cottman"
__license__ = "MIT License"
from typing import List, Union, Hashable
# any,
# Callable,
# Dict,
# ,
# Iterable,
# Set,
# Tuple,
import numpy as np
import pandas as pd
import re
import unicodedata
# rom pandas.util._validators import validate_bool_kwarg
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
# import pandas_flavor as pf
warnings.filterwarnings("ignore")
from sklearn.impute import SimpleImputer
# paso import
from paso.base import _Check_No_NA_Values, _array_to_string
from paso.base import _dict_value, _check_non_optional_kw
from paso.base import DataFrame_to_Xy, Xy_to_DataFrame
from paso.base import pasoFunction, pasoDecorators, raise_PasoError
from paso.base import _must_be_list_tuple_int, _merge_dicts
from paso.base import register_DataFrame_method
from loguru import logger
class Imputers(pasoFunction):
"""
Class to impute NaM from dataset. Encoding and scaling
and other data-set preprocessing should not be done here.
description_file:
project: HPKinetics/paso #[optional]
verbose: True #[optional]
inplace: True #[optional]
kind:
<strategy>:
description: "most_frequent value of feature is used for all NaN values"
genus: Imputer #[optional]
type: all-types
strategy: (list)
_imputers_: substituting np.nan with strategy keywords.
class global given below
Note:
Impute before other cleaning and encoding, These steps all expect
that NaNs have been removed. Use method Cleaners.Values_to_nan()
beforehand to change ant incatitot values to NaN.
"""
_imputer_type_supported_ = ("numeric", "all")
_imputer_simple_stategies_ = {
"median": "median",
"mean": "mean",
"most_frequent": "most_frequent",
"random": "random",
}
_imputer_advanced_stategies_ = {"knn": "knn", "mice": "mice"}
_imputers_ = _merge_dicts(_imputer_advanced_stategies_, _imputer_simple_stategies_)
@pasoDecorators.InitWrap()
def __init__(self, **kwargs) -> None:
"""
Keywords:
description_file:
Returns: instance of class imputer
"""
super().__init__()
@staticmethod
def imputers() -> List[str]:
"""
Parameters:
None
Returns: List of available class inputers names.
"""
return [k for k in Imputers._imputers_.keys()]
@pasoDecorators.TTWrap(array=False, _Check_No_NAs=False)
def transform(
self,
X: pd.DataFrame,
verbose: bool = True,
inplace: bool = True,
features: List[str] = None,
) -> pd.DataFrame:
"""
method to transform dataset by imputing NaN values. Encoding and scaling
and other data-set preprocessing should not be done here.
Parameters:
X: dataset
Keywords:
features: default: None
None: do all features (columns) in X
[a,b....z] do only features liseted
inplace:
True: mutate X, return X
False: do no change X, return X.copy()
type: any to be laced with np.NaN
verbose:
True: output
False: silent
Returns: X or X.copy - see inplace
"""
# todo pull datatypes automatically, just numericall and all??
# todo support other data types besides i.e. most_frequent can support
# cat/object/string if na
# todo incorporate this A Comparison of Six Methods for Missing Data Imputation
# https://www.omicsonline.org/open-access/a-comparison-of-six-methods-for-missing-data-imputation-2155-6180-1000224.pdf
# https://impyute.readthedocs.io/en/master/index.html
# todo checking passed arguments types are correct
# enforce order of method calls
# currently support only one learner, very brittle parser
if self.kind == {}:
raise_PasoError(
"keyword kind must be present at top level:{}:".format(
self.ontology_kwargs
)
)
if self.kind_name not in Imputers._imputers_:
raise_PasoError(
"no operation named: {} in imputers;: {}".format(
self.kind_name, Imputers._imputers_.keys()
)
)
if self.inplace:
nX = X
else:
nX = X.copy()
if features == None:
features = nX.columns
logger.warning(
"\nKeyword arg:features: not passed. All features of dataset will be checked for imputation\n{}".format(
features
)
)
else:
pass
## some of features not in columns
if not len(set(features).difference(set(self.Xcolumns))) == 0:
raise_PasoError(
"\nfeature given {} not in \ndataset columns:{}".format(
features, nX.columns
)
)
self.imputer_name = self.kind_name
# check each attirbute for all nand
for feature in features:
if nX[feature].nunique() <= 1:
raise_PasoError(
"Impute.transform: 1 or less unique values: {}.\n Remove this feature before calling Impute.".format(
nX[feature].unique()
)
)
if self.kind_name in Imputers._imputer_simple_stategies_:
# assume target does not have missung values
# todo handling target rows with missing values
# with simple row elimination? The idea being you should not try to predict missing values?
imputer = SimpleImputer(strategy=self.kind_name)
imputer.fit(nX[features])
nX[features] = imputer.transform(nX[features])
elif self.kind_name in Imputers._imputer_advanced_stategies_:
self.imputer = Imputers._imputer_advanced_stategies_[self.imputer_name](
**self.kind_name_kwargs
)
y = None # todo fix _imputer_advanced_stategies_
self.imputer.fit(nX, y)
self.imputer_type = self.type
nX[features] = self.imputer.transform(nX[features])
return nX
@register_DataFrame_method
def paso_impute(
X: pd.DataFrame,
class_instance: Imputers,
features: List[str] = None,
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
"""
method to transform dataset by imputing NaN values. Encoding and scaling
and other data-set preprocessing should not be done here.
Parameters:
X: dataset
Keywords:
class_instance:
target:
inplace:
True: mutate X, return X
False: do no change X, return X.copy()
type: any to be laced with np.NaN
verbose:
True: output
False: silent
Returns: X or X.copy - see inplace
"""
if inplace:
nX = X
else:
nX = X.copy()
return class_instance.transform(
nX, features=features, inplace=True, verbose=verbose
)
class Cleaners(pasoFunction):
"""
The class of data cleaners.
"""
_statistics = [
"kurt",
"mad",
"max",
"mean",
"median",
"min",
"sem",
"skew",
"sum",
"std",
"var",
"nunique",
"all",
]
col_name_NaN_ratio = "NaN_ratio"
####### 1
@register_DataFrame_method
def paso_values_to_nan(*args, **kwargs) -> pd.DataFrame:
return values_to_nan(*args, **kwargs)
def values_to_nan(
X: pd.DataFrame, values: List[str] = [], inplace: bool = True, verbose: bool = True
) -> pd.DataFrame:
"""
Different values can indicate, a value is missing. For example,
- ``999`` could mean "did not answer" in some features.
- ``NA`` could mean not-applicable for this feature/record.
- ``-1`` could mean missing for this feature/record-1could mean missing for this feature/record`.
and so on.
Parameters:
X: dataset
values:
inplace:
True: 0mutate X, return X
False: do no change X, return X.copy()
verbose:
True: output
False: silent
Returns: , X or X.copy - see inplace
"""
values = _must_be_list_tuple_int(values)
if values == []:
return X
y = X.replace(to_replace=values, value=np.nan, inplace=inplace)
if verbose:
logger.info("Values_to_nan {}".format(str(values)))
if inplace:
return X
else:
return y # .reoplace accommplishes the inplace
#
########## 2
@register_DataFrame_method
def paso_calculate_NaN_ratio(*args, **kwargs) -> pd.DataFrame:
return calculate_NaN_ratio(*args, **kwargs)
def calculate_NaN_ratio(
X: pd.DataFrame, axis=1, inplace: bool = True, verbose: bool = True
) -> [pd.DataFrame, pd.DataFrame]:
"""
For a row with a large ratio of missing values (an observation)
renders it statistically irrelevant. However, the row is not removed.
instead the nulls/total_feature_count is calculated for each row and
a new feature ``NA_ratio``is added to the returned **pandas** DataFrame.
Note:
Detecting and correcting for missing and outlier (good or bad)
values is an evolving area of research.
Parameters:
X: dataset
Keywords:
axis : {0 or ‘index’, 1 or ‘columns’}
Whether to drop labels from the index (0 or ‘index’)
or columns (1 or ‘columns’).
inplace:
True: mutate X, return X
False: do no change X, return X.copy()
type: any to be laced with np.NaN
verbose:
True: output
False: silent
Returns: X or X.copy - see inplace
Returns:
axis = 0 pd.DataFrame n_row
axis = 1 pd.DataFrame n_column
"""
total_row_count = X.shape[0]
total_feature_count = X.shape[1]
if verbose:
logger.info("Calculate_NA_ratio")
if axis == 1:
features = X.columns
column_mvr = pd.DataFrame(columns=features)
for feature in features:
r = 1 - X[feature].count() / total_row_count
column_mvr.at[0, feature] = r
if inplace:
if axis == 0:
X[Cleaners.col_name_NaN_ratio] = 1 - X.count(axis=1) / total_feature_count
return X
elif axis == 1:
return column_mvr
else:
if axis == 0:
y = copy.deepcopy(X)
return y
if axis == 1:
return column_mvr
############## 3
@register_DataFrame_method
def paso_delete_NA_Features(*args, **kwargs) -> pd.DataFrame:
return delete_NA_Features(*args, **kwargs)
def delete_NA_Features(
X: pd.DataFrame,
threshold: List = [],
axis: int = 0,
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
if inplace:
y = X
else:
y = X.copy()
if axis == 0: # r_mvr, drop rows
y = calculate_NaN_ratio(y, inplace=inplace, verbose=verbose, axis=axis)
return y[y[Cleaners.col_name_NaN_ratio] < threshold]
elif axis == 1: # drop features/columns
column_mvr = calculate_NaN_ratio(y, inplace=inplace, verbose=verbose, axis=axis)
for feature in y.columns:
if column_mvr.at[0, feature] >= threshold:
y.drop([feature], inplace=True, axis=1)
return y
else:
raise_PasoError(
"{} axis can be 0 ot 1 was:{}".format("column_missing_value_ratio", axis)
)
########## 4
@register_DataFrame_method
def paso_delete_Duplicate_Features(*args, **kwargs) -> pd.DataFrame:
return delete_Duplicate_Features(*args, **kwargs)
def delete_Duplicate_Features(
X: pd.DataFrame,
features: List[str] = [],
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
"""
If a feature has the same values by index as another feature
then one of those features should be deleted. The duplicate
feature is redundant and will have no predictive power.
Duplicate features are quite common as an enterprise's
database or data lake ages and different data sources are added.
Parameters:
X: dataset
Keywords:
features: (default: None)
None: do all NOT features (columns) in X
[a,b....z] do only features listed
inplace:
True: mutate X, return X
False: do no change X, return X.copy()
type: any to be laced with np.NaN
verbose:
True: output
False: silent
Returns: X or X.copy - see inplace
"""
_Check_No_NA_Values(X)
equal = {}
for nth, f in enumerate(X.columns[0:-1]):
if f in features:
break
# the twisted logic below is for speed. most values are ne
for mth, g in enumerate(X.columns[nth + 1 :]):
if (X[f].values != X[g].values).any():
pass
elif g not in equal:
equal[g] = 1
drop_list = list(equal.keys())
if len(drop_list) > 0:
y = X.drop(columns=drop_list, inplace=True, index=1)
if verbose:
logger.info("Duplicate_Features_Removed: {}".format(str(drop_list)))
if inplace:
return X
else:
return y
########## 5
@register_DataFrame_method
def paso_delete_Features_with_Single_Unique_Value(*args, **kwargs) -> pd.DataFrame:
return delete_Features_with_Single_Unique_Value(*args, **kwargs)
def delete_Features_with_Single_Unique_Value(
X: pd.DataFrame,
features: List[str] = [],
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
"""
This method finds all the features which have only one unique value.
The variation between values is zero. All these features are removed from
the DataFrame as they have no predictive ability.
Parameters:
X: dataset
Keywords:
features: (default: None)
None: do all NOT features (columns) in X
[a,b....z] do only features listed
inplace: ,
True: mutate X, return X
False: do no change X, return X.copy()
type: any to be laced with np.NaN
verbose:
True: output
False: silent
Returns: X or X.copy - see inplace
"""
_Check_No_NA_Values(X)
efs = []
n = 0
for f in X.columns:
if f in features:
pass
# weirdness where df has
# 2 or more features with same name
# len(df[f].squeeze().shape) == 1) and
elif X[f].nunique() == 1:
efs.append(f)
n += 1
if n > 0:
if verbose:
logger.info("Eliminate_Single_Unique_Value_Features {}".format(str(efs)))
for f in efs:
y = X.drop(f, inplace=inplace, axis=1)
if inplace:
return X
else:
return y
# 5nb.
@register_DataFrame_method
def paso_delete_Features_with_All_Unique_Values(*args, **kwargs) -> pd.DataFrame:
return delete_Features_with_All_Unique_Values(*args, **kwargs)
def delete_Features_with_All_Unique_Values(
X: pd.DataFrame,
features: List[str] = [],
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
"""
This method finds all the features where every value is s
unique value, whose unique count equals value count.
All these features are removed from
the DataFrame as they have no predictive ability.
Parameters:
X: dataset
Keywords:
features: (default: None)
None: do all NOT features (columns) in X
[a,b....z] do only features listed
inplace:
True: mutate X, return X
False: do no change X, return X.copy()
type: any to be laced with np.NaN
verbose:
True: output
False: silent
Returns: X or X.copy - see inplace
Note: All NaN imputed or removed.
"""
_Check_No_NA_Values(X)
efs = []
n = 0
for f in X.columns:
if f in features:
pass
# weirdness where df has
# 2 or more features with same name
# len(df[f].squeeze().shape) == 1) and
elif X[f].nunique() == X[f].count():
efs.append(f)
n += 1
if n > 0:
if verbose:
logger.info("delete_Features_with_All_Unique_Values {}".format(str(efs)))
for f in efs:
y = X.drop(f, inplace=inplace, axis=1)
if inplace:
return X
else:
return y
########## 6.a
@register_DataFrame_method
def paso_statistics(*args, **kwargs) -> pd.DataFrame:
return statistics(*args, **kwargs)
def statistics() -> List[str]:
"""
List of all statistic terms available. The term "all" triggers
calculation of all statistics.
Returns: List[str]
"""
return [k for k in Cleaners._statistics]
############ 6.b
@register_DataFrame_method
def paso_feature_Statistics(*args, **kwargs) -> pd.DataFrame:
return feature_Statistics(*args, **kwargs)
def feature_Statistics(
X: pd.DataFrame,
statistics: str = "all",
concat: bool = True,
axis: int = 0,
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
# todo row statistics
"""
Calculate the statistics of each feature and returns a DataFrame
where each row is that statistic.
This method can be used as an diagnostic tool (concat = False)
to decide if the sd or any other statistic is too small
and thus will have low predictive power. It can also added onto the dataset
(concat=True).
When the target feature removed, each statistic helps desrcibe
the distribution ot the feature (or row).
This will only make sense for those features
whose values are numeric.
Parameters:
X: dataset
Keywords:
statistics: Must be 'all'or a list of symbols in Cleaners._statistics
concat:
if concat is True, ten the statistics is concated to right (or bottom)
of the DataFrame is returned
axis : int {0 or ‘rows’, 1 or ‘columns’)
Whether to calculate statistics for the rows (0)
or columns (1).
inplace:
True: mutate X, return X
False: do no change X, return df-stats
verbose:
True: output
False: silent
Returns:
axis = 0 pd.DataFrame n_row (concat on right)
axis = 1 pd.DataFrame n_column (concat on bottom)
Note:
All NaN values should be imputed or removed.
Including target will cause leakage of ground truth into test.
Statistics only make sense when all vales are scaled.
"""
# work around automated review complaint
if inplace:
y = X
else:
y = X.copy()
_Check_No_NA_Values(X)
_must_be_list_tuple_int(statistics)
if "all" in statistics:
statistics = Cleaners._statistics[:-1]
tmp_statistics = []
for stat in statistics:
if stat in Cleaners._statistics[:-1]:
tmp_statistics.append(stat)
else:
raise_PasoError(
"\n One of {}\n is unknown statistic from the list of accepted statistics\n{}".format(
statistics, Cleaners._statistics[:-1]
)
)
statistics = tmp_statistics
if verbose:
logger.info("Features_Statistics {}".format(statistics))
# by column stats
if axis == 1:
column_stats = pd.DataFrame()
for stat in statistics:
m = eval("X." + stat + "(axis=1)")
column_stats = pd.concat([column_stats, m], axis=1)
column_stats.columns = statistics
if concat:
return pd.concat([y, column_stats], axis=1)
else:
return column_stats
# by row stats
elif axis == 0:
s = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 11:13:15 2019
@author: jkern
"""
from __future__ import division
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def hydro(sim_years):
#########################################################################
# This purpose of this script is to use synthetic streamflows at major California
# reservoir sites to simulate daily hydropower production for the PG&E and SCE
# zones of the California electricty market (CAISO), using parameters optimized
# via a differential evolution algorithm.
#########################################################################
# load California storage reservoir (ORCA) sites
df_sites = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name = 'ORCA',header=0)
ORCA_sites = list(df_sites)
# load upper generation amounts for each predicted hydropower dam (PG&E and SCE)
upper_gen = pd.read_excel('CA_hydropower/upper.xlsx',header =0)
# month-day calender
calender = pd.read_excel('CA_hydropower/calender.xlsx',header=0)
# load simulated full natural flows at each California storage reservoir (ORCA site)
df_sim = pd.read_csv('Synthetic_streamflows/synthetic_streamflows_CA.csv',header=0,index_col=0)
df_sim = df_sim.loc[0:(sim_years+3)*365,:]
# load simulated outflows calculated by ORCA
df_ORCA = pd.read_csv('ORCA_output.csv')
outflow_sites = ['SHA_otf','ORO_otf','YRS_otf','FOL_otf','NML_otf','DNP_otf','EXC_otf','MIL_otf','ISB_otf','SUC_otf','KWH_otf','PFT_otf']
for i in range(0,len(df_ORCA)):
for s in outflow_sites:
df_sim.loc[i,s] = df_ORCA.loc[i,s]
sim_years = sim_years+3
#Add month and day columns to the dataframe
Month = []
Day = []
count = 0
for i in range(0,len(df_sim)):
if count < 365:
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
else:
count = 0
Month = np.append(Month,calender.loc[count,'Month'])
Day = np.append(Day,calender.loc[count,'Day'])
count = count + 1
df_sim['Month']=Month
df_sim['Day']=Day
# calculate simulated totals
Sim_totals = []
for i in range(0,sim_years):
sample = df_sim.loc[i*365:i*365+365,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Sim_totals = np.append(Sim_totals,total)
# load historical full natural flows for 2001, 2005, 2010 and 2011
df_hist = pd.read_excel('CA_hydropower/hist_reservoir_inflows.xlsx',header=0)
Hist_totals = []
Hist_years = [2001,2005,2010,2011]
for i in Hist_years:
sample = df_hist[df_hist['year'] == i]
sample = sample.loc[:,'ORO_fnf':'ISB_fnf']
total = np.sum(np.sum(sample))
Hist_totals = np.append(Hist_totals,total)
# find most similar historical year for each simulated year
Rule_list=[]
for i in range(0,sim_years):
Difference=abs(Sim_totals[i]- Hist_totals)
#Select which rule to use
for n in range(0,len(Hist_years)):
if Difference[n]==np.min(Difference):
Rule=n
Rule_list.append(Rule)
# PGE hydro projects
PGE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='PGE',header=0)
PGE_dams = list(PGE_names.loc[:,'Balch 1':])
PGE_Storage=[PGE_dams[3],PGE_dams[7],PGE_dams[8],PGE_dams[9]]
PGE_No_Data_Dams=[PGE_dams[2],PGE_dams[4],PGE_dams[10],PGE_dams[11],PGE_dams[15],PGE_dams[16],PGE_dams[17],PGE_dams[26],PGE_dams[30],PGE_dams[38],PGE_dams[39],PGE_dams[55],PGE_dams[60],PGE_dams[65]]
## SCE hydro projects
SCE_names = pd.read_excel('CA_hydropower/sites.xlsx',sheet_name ='SCE',header=0)
SCE_dams = list(SCE_names.loc[:,'Big_Creek_1 ':])
SCE_No_Data_Dams=[SCE_dams[7],SCE_dams[8],SCE_dams[12]]
#Simulate all the PGE inflow dams
check_unused = []
PGE_name_list = []
SCE_name_list = []
for name in PGE_dams:
est_power = []
for year in range(0,sim_years):
if name in PGE_No_Data_Dams:
pass
elif name in PGE_Storage:
# which operating rule to use?
Rule=Rule_list[year]
File_name='CA_hydropower/A1.0_FNF_Storage_Rule_' + str(name) +'.txt'
Temp_Rule= | pd.read_csv(File_name,delimiter=' ',header=None) | pandas.read_csv |
import gsum as gm
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from stats_utils import *
from matter import *
import seaborn as sns
import time
from os import path
mpl.rcParams['text.usetex'] = True
mpl.rcParams['figure.dpi'] = 150
mpl.rcParams['font.family'] = 'serif'
mpl.rc('savefig', transparent=False, bbox='tight', pad_inches=0.05, format='pdf')
df = pd.read_csv('../data/all_matter_data.csv')
# Convert differences to total prediction at each MBPT order
mbpt_orders = ['Kin', 'MBPT_HF', 'MBPT_2', 'MBPT_3', 'MBPT_4']
df[mbpt_orders] = df[mbpt_orders].apply(np.cumsum, axis=1)
# 'total' is now unnecessary. Remove it.
df.pop('total')
orders = np.array([0, 2, 3, 4])
# body = 'NN-only'
body = 'NN+3N'
Lambda = 450
info_list = []
from itertools import product
for body, Lambda in product(['NN+3N', 'NN-only'], [450, 500]):
fits = {450: [1, 7], 500: [4, 10]}
train1 = slice(None, None, 5)
valid1 = slice(2, None, 5)
# valid1 = np.array([i % 5 != 0 for i in range(len())])
[fit_n2lo, fit_n3lo] = fits[Lambda]
savefigs = True
Lb = 600
breakdown_min = 300
breakdown_max = 1000
breakdown_num = 100
Lb_vals = np.linspace(breakdown_min, breakdown_max, breakdown_num)
Lb_logprior_vals = Lb_logprior(Lb_vals)
ls_min = 0.1
ls_max = 1.5
ls_num = 50
ls_vals = np.linspace(ls_min, ls_max, ls_num)
# ls_min = ls_max = ls_num = ls_vals = None
nugget = 1e-4
kernel1 = RBF(length_scale=1, length_scale_bounds=(5e-2, 4)) + \
WhiteKernel(noise_level=nugget, noise_level_bounds='fixed')
# kernel1 = RBF(length_scale=1, length_scale_bounds=(1e-2, 100)) + \
# WhiteKernel(noise_level=nugget, noise_level_bounds='fixed')
kernel1_theta = kernel1.theta
ref1 = 16
hyperparams = dict(
center=0,
disp=0,
df=1,
scale=1
)
mask_fit = np.isin(df['fit'], fits[Lambda]) | np.isnan(df['fit'])
mask1 = \
(df['Body'] == body) & \
mask_fit & \
(df['Lambda'] == Lambda)
# df_fit = df[mask_fit]
df_n = df[mask1 & (df['x'] == 0)]
df_s = df[mask1 & (df['x'] == 0.5)]
kf_n = df_n[df_n['OrderEFT'] == 'LO']['kf'].values
kf_s = df_s[df_s['OrderEFT'] == 'LO']['kf'].values
density = df_n[df_n['OrderEFT'] == 'LO']['n'].values
kf_d = kf_n.copy()
# valid1 = np.arange(len(kf_n)) % 5 != 0
Kf_n = kf_n[:, None]
Kf_s = kf_s[:, None]
Kf_d = kf_d[:, None]
y1_n = np.array([df_n[df_n['OrderEFT'] == order]['MBPT_4'].values for order in df_n['OrderEFT'].unique()]).T
y1_s = np.array([df_s[df_s['OrderEFT'] == order]['MBPT_4'].values for order in df_s['OrderEFT'].unique()]).T
y1_d = y1_n - y1_s
fig_path = 'new_figures'
analysis_n = MatterConvergenceAnalysis(
X=Kf_n, y=y1_n, orders=orders, train=train1, valid=valid1, ref=ref1, ratio='kf', density=density,
kernel=kernel1, system='neutron', fit_n2lo=fit_n2lo, fit_n3lo=fit_n3lo, Lambda=Lambda,
body=body, savefigs=savefigs, fig_path=fig_path, **hyperparams
)
analysis_s = MatterConvergenceAnalysis(
X=Kf_s, y=y1_s, orders=orders, train=train1, valid=valid1, ref=ref1, ratio='kf', density=density,
kernel=kernel1, system='symmetric', fit_n2lo=fit_n2lo, fit_n3lo=fit_n3lo, Lambda=Lambda,
body=body, savefigs=savefigs, fig_path=fig_path, **hyperparams
)
analysis_d = MatterConvergenceAnalysis(
X=Kf_d, y=y1_d, orders=orders, train=train1, valid=valid1, ref=ref1, ratio='kf', density=density,
kernel=kernel1, system='difference', fit_n2lo=fit_n2lo, fit_n3lo=fit_n3lo, Lambda=Lambda,
body=body, savefigs=savefigs, fig_path=fig_path, **hyperparams
)
t_start = time.time()
analysis_n.setup_posteriors(
breakdown_min=breakdown_min, breakdown_max=breakdown_max, breakdown_num=breakdown_num,
ls_min=ls_min, ls_max=ls_max, ls_num=ls_num,
max_idx=[2, 3], logprior=None
)
analysis_s.setup_posteriors(
breakdown_min=breakdown_min, breakdown_max=breakdown_max, breakdown_num=breakdown_num,
ls_min=ls_min, ls_max=ls_max, ls_num=ls_num,
max_idx=[2, 3], logprior=None
)
analysis_d.setup_posteriors(
breakdown_min=breakdown_min, breakdown_max=breakdown_max, breakdown_num=breakdown_num,
ls_min=ls_min, ls_max=ls_max, ls_num=ls_num,
max_idx=[2, 3], logprior=None
)
print(time.time() - t_start)
df_Lb_pdf_all = analysis_n.df_breakdown.copy()
df_Lb_pdf_all['pdf'] = analysis_n.df_breakdown['pdf'] * analysis_s.df_breakdown['pdf'] * analysis_d.df_breakdown['pdf']
df_Lb_pdf_all['system'] = 'All'
def dict_to_str(d):
s = ''
for key, value in d.items():
s += f'{key}-{value}_'
s = s.replace('.', 'p')
return s[:-1]
fig, ax = plt.subplots(figsize=(3.4, 4.4))
df_Lb_pdf = pd.concat([analysis_n.df_breakdown, analysis_s.df_breakdown, analysis_d.df_breakdown, df_Lb_pdf_all])
ax = pdfplot(
x=r'$\Lambda_b$ (MeV)', y='system', pdf='pdf', data=df_Lb_pdf, hue='Order',
order=[r'$E/N$', r'$E/A$', r'$S_2$', 'All'], hue_order=[r'N$^2$LO', r'N$^3$LO'], cut=1e-2, linewidth=1,
palette="coolwarm", saturation=1., ax=ax, margin=0.3,
)
ax.set_xlim(0, 1200)
ax.set_xticks([0, 300, 600, 900, 1200])
ax.grid(axis='x')
ax.set_axisbelow(True)
if savefigs:
name = analysis_n.figure_name(
'Lb_pdfs_', breakdown=(breakdown_min, breakdown_max, breakdown_num), include_system=False,
ls=(ls_min, ls_max, ls_num),
)
fig.savefig(
name
)
info = analysis_n.model_info()
name = path.relpath(name, analysis_n.fig_path)
info['name'] = name
info_list.append(info)
df_ls_pdf = | pd.concat([analysis_n.df_ls, analysis_s.df_ls, analysis_d.df_ls]) | pandas.concat |
#!/usr/bin/env python
#
# Script for 5' assignment of 5'P-Seq data
# input is BAM file must contain NH tag
# reads with the tag NH:i:1 only included
# output 1: raw counts in *_iv.h5 - single indexed
# output 2: normalised RPM in _idx_iv.h5 - double indexed
#
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__version__ = "0.1.2"
__email__ = "<EMAIL>"
__status__ = "beta"
import re
import sys
import pysam
import argparse
import pandas as pd
from collections import Counter
parser = argparse.ArgumentParser(description="five prime assinment of 5'P-Seq data" )
parser.add_argument('-i', type=str, help='aligned_sorted.bam')
args = parser.parse_args()
sys.stderr.write("\n\
-i input : {}\n\n".format(args.i))
usage = 'python fivePassignment.py -i aligned_sorted.bam"'
if args.i==None:
sys.exit("\n usage:\n\t{}\n".format(usage))
raw_out = False # bool
# output file name from infilename
f = re.sub(r'_sorted.bam', '', re.sub(r'.*\/', '', args.i))
outf_raw_hdf = "{}_raw_iv.h5".format(f)
outf_rpm_hdf = "{}_rpm_iv.h5".format(f)
outf_idx_hdf = "{}_idx_iv.h5".format(f)
def yeastChr():
# Ordered yeast Chr list short names from ensembl
return ['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','Mito']
def update_df(df, Chr, strand):
df.fillna(0, inplace=True)
columns = list(df.columns)
columns = ["Chr", "Position", "Strand"] + columns
df["Chr"] = Chr
df["Strand"] = strand
df["Position"] = df.index
return df[columns]
def restructurate_hd5(infile, outfile, close_outfile=True):
""" infile.h5 keys - "/For_raw", "/Rev_raw", ...
outfile.h2 keys - "/For_raw/I", "/For_raw/II", ... etc
"Position" is set to index
:param infile:
:param outfile:
:return: reindexed 2 level hdf
"""
# open inp_HDF
inp__h5 = pd.HDFStore(infile, "r")
outp_h5 = | pd.HDFStore(outfile, complevel=5, complib="zlib", mode="w") | pandas.HDFStore |
import os
import requests
from time import sleep, time
import pandas as pd
from polygon import RESTClient
from dotenv import load_dotenv, find_dotenv
from FileOps import FileReader, FileWriter
from TimeMachine import TimeTraveller
from Constants import PathFinder
import Constants as C
class MarketData:
def __init__(self):
load_dotenv(find_dotenv('config.env'))
self.writer = FileWriter()
self.reader = FileReader()
self.finder = PathFinder()
self.traveller = TimeTraveller()
self.provider = 'iexcloud'
def get_indexer(self, s1, s2):
return list(s1.intersection(s2))
def try_again(self, func, **kwargs):
retries = (kwargs['retries']
if 'retries' in kwargs
else C.DEFAULT_RETRIES)
delay = (kwargs['delay']
if 'delay' in kwargs
else C.DEFAULT_DELAY)
func_args = {k: v for k, v in kwargs.items() if k not in {
'retries', 'delay'}}
for retry in range(retries):
try:
return func(**func_args)
except Exception as e:
if retry == retries - 1:
raise e
else:
sleep(delay)
def get_symbols(self):
# get cached list of symbols
symbols_path = self.finder.get_symbols_path()
return list(self.reader.load_csv(symbols_path)[C.SYMBOL])
def get_dividends(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_dividends_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.EX, timeframe)
return filtered
def standardize(self, df, full_mapping,
filename, columns, default):
mapping = {k: v for k, v in full_mapping.items() if k in df}
df = df[list(mapping)].rename(columns=mapping)
time_col, val_cols = columns[0], columns[1:]
if time_col in df and set(val_cols).issubset(df.columns):
df = self.reader.update_df(
filename, df, time_col).sort_values(by=[time_col])
# since time col is pd.datetime,
# consider converting to YYYY-MM-DD str format
for val_col in val_cols:
df[val_col] = df[val_col].apply(
lambda val: float(val) if val else default)
return df
def standardize_dividends(self, symbol, df):
full_mapping = dict(
zip(
['exDate', 'paymentDate', 'declaredDate', 'amount'],
[C.EX, C.PAY, C.DEC, C.DIV]
)
)
filename = self.finder.get_dividends_path(symbol, self.provider)
return self.standardize(
df,
full_mapping,
filename,
[C.EX, C.DIV],
0
)
def save_dividends(self, **kwargs):
# given a symbol, save its dividend history
symbol = kwargs['symbol']
filename = self.finder.get_dividends_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_dividends(**kwargs), C.EX, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def get_splits(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_splits_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.EX, timeframe)
return filtered
def standardize_splits(self, symbol, df):
full_mapping = dict(
zip(
['exDate', 'paymentDate', 'declaredDate', 'ratio'],
[C.EX, C.PAY, C.DEC, C.RATIO]
)
)
filename = self.finder.get_splits_path(symbol, self.provider)
return self.standardize(
df,
full_mapping,
filename,
[C.EX, C.RATIO],
1
)
def save_splits(self, **kwargs):
# given a symbol, save its splits history
symbol = kwargs['symbol']
filename = self.finder.get_splits_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_splits(**kwargs), C.EX, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_ohlc(self, symbol, df, filename=None):
full_mapping = dict(
zip(
['date', 'open', 'high', 'low', 'close',
'volume', 'average', 'trades'],
[C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE,
C.VOL, C.AVG, C.TRADES]
)
)
filename = filename or self.finder.get_ohlc_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE],
0
)
for col in [C.VOL, C.TRADES]:
if col in df:
df[col] = df[col].apply(
lambda val: 0 if pd.isnull(val) else int(val))
return df
def get_ohlc(self, symbol, timeframe='max'):
df = self.reader.load_csv(
self.finder.get_ohlc_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)
return filtered
def save_ohlc(self, **kwargs):
symbol = kwargs['symbol']
filename = self.finder.get_ohlc_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_ohlc(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def get_social_sentiment(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sentiment_path(symbol))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.POS, C.NEG]]
return filtered
def get_social_volume(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sentiment_path(symbol))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.VOL, C.DELTA]]
return filtered
def save_social_sentiment(self, **kwargs):
# # given a symbol, save its sentiment data
symbol = kwargs['symbol']
filename = self.finder.get_sentiment_path(symbol)
if os.path.exists(filename):
os.remove(filename)
sen_df = self.reader.update_df(
filename, self.get_social_sentiment(**kwargs), C.TIME)
sen_df = sen_df[self.get_indexer(
{C.TIME, C.POS, C.NEG}, sen_df.columns)]
vol_df = self.reader.update_df(
filename, self.get_social_volume(**kwargs), C.TIME)
vol_df = vol_df[self.get_indexer(
{C.TIME, C.VOL, C.DELTA}, vol_df.columns)]
if sen_df.empty and not vol_df.empty:
df = vol_df
elif not sen_df.empty and vol_df.empty:
df = sen_df
elif not sen_df.empty and not vol_df.empty:
df = sen_df.merge(vol_df, how="outer", on=C.TIME)
else:
return
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_sentiment(self, symbol, df):
full_mapping = dict(
zip(
['timestamp', 'bullish', 'bearish'],
[C.TIME, C.POS, C.NEG]
)
)
filename = self.finder.get_sentiment_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.POS, C.NEG],
0
)
return df[self.get_indexer({C.TIME, C.POS, C.NEG}, df.columns)]
def standardize_volume(self, symbol, df):
full_mapping = dict(
zip(
['timestamp', 'volume_score', 'volume_change'],
[C.TIME, C.VOL, C.DELTA]
)
)
filename = self.finder.get_sentiment_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.VOL, C.DELTA],
0
)
return df[self.get_indexer({C.TIME, C.VOL, C.DELTA}, df.columns)]
def get_intraday(self, symbol, min=1, timeframe='max', extra_hrs=False):
# implement way to transform 1 min dataset to 5 min data
# or 30 or 60 should be flexible soln
# implement way to only get market hours
# given a symbol, return a cached dataframe
dates = self.traveller.dates_in_range(timeframe)
for date in dates:
df = self.reader.load_csv(
self.finder.get_intraday_path(symbol, date, self.provider))
yield self.reader.data_in_timeframe(df, C.TIME, timeframe)
def save_intraday(self, **kwargs):
symbol = kwargs['symbol']
dfs = self.get_intraday(**kwargs)
filenames = []
for df in dfs:
date = df[C.TIME].iloc[0].strftime(C.DATE_FMT)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
if os.path.exists(filename):
os.remove(filename)
save_fmt = f'{C.DATE_FMT} {C.TIME_FMT}'
df = self.reader.update_df(
filename, df, C.TIME, save_fmt)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
filenames.append(filename)
return filenames
def get_unemployment_rate(self, timeframe='max'):
# given a timeframe, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_unemployment_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)
return filtered
def standardize_unemployment(self, df):
full_mapping = dict(
zip(
['time', 'value'],
[C.TIME, C.UN_RATE]
)
)
filename = self.finder.get_unemployment_path()
return self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.UN_RATE],
0
)
def save_unemployment_rate(self, **kwargs):
# given a symbol, save its dividend history
filename = self.finder.get_unemployment_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_unemployment_rate(**kwargs), C.TIME, '%Y-%m')
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_s2f_ratio(self, df):
full_mapping = dict(
zip(
['t', 'o.daysTillHalving', 'o.ratio'],
[C.TIME, C.HALVING, C.RATIO]
)
)
filename = self.finder.get_s2f_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.HALVING, C.RATIO],
0
)
return df[self.get_indexer({C.TIME, C.HALVING, C.RATIO}, df.columns)]
def get_s2f_ratio(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_s2f_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.HALVING, C.RATIO]]
return filtered
def save_s2f_ratio(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_s2f_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_s2f_ratio(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_diff_ribbon(self, df):
full_mapping = dict(
zip(
['t', 'o.ma9', 'o.ma14', 'o.ma25', 'o.ma40',
'o.ma60', 'o.ma90', 'o.ma128', 'o.ma200'],
[C.TIME] + C.MAs
)
)
filename = self.finder.get_diff_ribbon_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME] + C.MAs,
0
)
return df[self.get_indexer(set([C.TIME] + C.MAs), df.columns)]
def get_diff_ribbon(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_diff_ribbon_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME] + C.MAs]
return filtered
def save_diff_ribbon(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_diff_ribbon_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_diff_ribbon(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_sopr(self, df):
full_mapping = dict(
zip(
['t', 'v'],
[C.TIME, C.SOPR]
)
)
filename = self.finder.get_diff_ribbon_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.SOPR],
1
)
return df[self.get_indexer({C.TIME, C.SOPR}, df.columns)]
def get_sopr(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sopr_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.SOPR]]
return filtered
def save_sopr(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_sopr_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_sopr(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
# def handle_request(self, url, err_msg):
class IEXCloud(MarketData):
def __init__(self, test=False):
super().__init__()
self.version = 'v1'
self.provider = 'iexcloud'
if test:
self.base = 'https://sandbox.iexapis.com'
self.token = os.environ['IEXCLOUD_SANDBOX']
else:
self.base = 'https://cloud.iexapis.com'
self.token = os.environ['IEXCLOUD']
def get_dividends(self, **kwargs):
# given a symbol, return the dividend history
def _get_dividends(symbol, timeframe='3m'):
category = 'stock'
dataset = 'dividends'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = [datum for datum in response.json() if datum['flag']
== 'Cash' and datum['currency'] == 'USD']
else:
raise Exception(
f'Invalid response from IEX for {symbol} dividends.')
if data == []:
return empty
df = self.standardize_dividends(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_dividends, **kwargs)
def get_splits(self, **kwargs):
# given a symbol, return the stock splits
def _get_splits(symbol, timeframe='3m'):
category = 'stock'
dataset = 'splits'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} splits.')
if data == []:
return empty
df = self.standardize_splits(symbol, | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rc
sns.set()
datasets = {'cub200': 'CUB200-2011',
'mini-imagenet': 'mini-ImageNet',
'cifar-fs': 'CIFAR-FS'}
line_styles = [':', '--', '-', '-.']
class ApproximateAccuracy(object):
def __init__(self, data_file_path: str):
self.data_file_path = data_file_path
def at_radii(self, radii: np.ndarray) -> np.ndarray:
df = | pd.read_csv(self.data_file_path, delimiter="\t") | pandas.read_csv |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from application import model_builder
def test_validate_types_numeric_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = new_expect["Some Feature"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [3, 4, 5]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = ["3", "4", "5"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_numeric_string_converts_throws_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["3d", "4d", "5d"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Numeric"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s c", "0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Percentage"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [30.0, 40.0, 50.0]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [0.3, 0.4, 0.5]
df["Some Feature 2"] = ["0.3%", "0.4 %", " 0.5 %"]
df["Some Feature 3"] = ["30", "40", " 50"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Percentage"],
["Some Feature 2", "Percentage"],
["Some Feature 3", "Percentage"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_money_converts_throws_value_error():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = ["0.3s$", "$0.4", "0.5"]
df["Answer"] = [1, 2, 3]
fields = [["Some Feature", "Money"],
["Answer", "Response Variable"]]
# Act and Assert
with pytest.raises(ValueError):
model_builder.validate_types(df, fields)
def test_validate_types_percentage_converts_success():
# Arrange
df = pd.DataFrame()
new_expect = pd.DataFrame()
new_expect["Some Feature"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 2"] = [30.0, 40.0, 50.0]
new_expect["Some Feature 3"] = [50000, 40000.0, 50000]
new_expect["Answer"] = [1, 2, 3]
df["Some Feature"] = [30, 40, 50]
df["Some Feature 2"] = ["$30", "$ 40 ", " $50 "]
df["Some Feature 3"] = ["$50,000", "40000", " 50,000"]
df["Answer"] = new_expect["Answer"]
fields = [["Some Feature", "Money"],
["Some Feature 2", "Money"],
["Some Feature 3", "Money"],
["Answer", "Response Variable"]]
# Act
x = model_builder.validate_types(df, fields)
# Assert
assert_frame_equal(x, new_expect, check_dtype=False)
def test_validate_types_value_set_success():
# Arrange
df = pd.DataFrame()
new_expect = | pd.DataFrame() | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from functools import partial
from typing import Any, Callable, Dict, Tuple, Union
import numpy as np
import pandas as pd
from flask_babel import gettext as _
from pandas import DataFrame, NamedAgg, Timestamp
from superset.exceptions import InvalidPostProcessingError
NUMPY_FUNCTIONS = {
"average": np.average,
"argmin": np.argmin,
"argmax": np.argmax,
"count": np.ma.count,
"count_nonzero": np.count_nonzero,
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"max": np.max,
"mean": np.mean,
"median": np.median,
"nansum": np.nansum,
"nanmin": np.nanmin,
"nanmax": np.nanmax,
"nanmean": np.nanmean,
"nanmedian": np.nanmedian,
"nanpercentile": np.nanpercentile,
"min": np.min,
"percentile": np.percentile,
"prod": np.prod,
"product": np.product,
"std": np.std,
"sum": np.sum,
"var": np.var,
}
DENYLIST_ROLLING_FUNCTIONS = (
"count",
"corr",
"cov",
"kurt",
"max",
"mean",
"median",
"min",
"std",
"skew",
"sum",
"var",
"quantile",
)
ALLOWLIST_CUMULATIVE_FUNCTIONS = (
"cummax",
"cummin",
"cumprod",
"cumsum",
)
PROPHET_TIME_GRAIN_MAP = {
"PT1S": "S",
"PT1M": "min",
"PT5M": "5min",
"PT10M": "10min",
"PT15M": "15min",
"PT30M": "30min",
"PT1H": "H",
"P1D": "D",
"P1W": "W",
"P1M": "M",
"P3M": "Q",
"P1Y": "A",
"1969-12-28T00:00:00Z/P1W": "W-SUN",
"1969-12-29T00:00:00Z/P1W": "W-MON",
"P1W/1970-01-03T00:00:00Z": "W-SAT",
"P1W/1970-01-04T00:00:00Z": "W-SUN",
}
RESAMPLE_METHOD = ("asfreq", "bfill", "ffill", "linear", "median", "mean", "sum")
FLAT_COLUMN_SEPARATOR = ", "
def _flatten_column_after_pivot(
column: Union[float, Timestamp, str, Tuple[str, ...]],
aggregates: Dict[str, Dict[str, Any]],
) -> str:
"""
Function for flattening column names into a single string. This step is necessary
to be able to properly serialize a DataFrame. If the column is a string, return
element unchanged. For multi-element columns, join column elements with a comma,
with the exception of pivots made with a single aggregate, in which case the
aggregate column name is omitted.
:param column: single element from `DataFrame.columns`
:param aggregates: aggregates
:return:
"""
if not isinstance(column, tuple):
column = (column,)
if len(aggregates) == 1 and len(column) > 1:
# drop aggregate for single aggregate pivots with multiple groupings
# from column name (aggregates always come first in column name)
column = column[1:]
return FLAT_COLUMN_SEPARATOR.join([str(col) for col in column])
def _is_multi_index_on_columns(df: DataFrame) -> bool:
return isinstance(df.columns, pd.MultiIndex)
def validate_column_args(*argnames: str) -> Callable[..., Any]:
def wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
def wrapped(df: DataFrame, **options: Any) -> Any:
if _is_multi_index_on_columns(df):
# MultiIndex column validate first level
columns = df.columns.get_level_values(0)
else:
columns = df.columns.tolist()
for name in argnames:
if name in options and not all(
elem in columns for elem in options.get(name) or []
):
raise InvalidPostProcessingError(
_("Referenced columns not available in DataFrame.")
)
return func(df, **options)
return wrapped
return wrapper
def _get_aggregate_funcs(
df: DataFrame,
aggregates: Dict[str, Dict[str, Any]],
) -> Dict[str, NamedAgg]:
"""
Converts a set of aggregate config objects into functions that pandas can use as
aggregators. Currently only numpy aggregators are supported.
:param df: DataFrame on which to perform aggregate operation.
:param aggregates: Mapping from column name to aggregate config.
:return: Mapping from metric name to function that takes a single input argument.
"""
agg_funcs: Dict[str, NamedAgg] = {}
for name, agg_obj in aggregates.items():
column = agg_obj.get("column", name)
if column not in df:
raise InvalidPostProcessingError(
_(
"Column referenced by aggregate is undefined: %(column)s",
column=column,
)
)
if "operator" not in agg_obj:
raise InvalidPostProcessingError(
_(
"Operator undefined for aggregator: %(name)s",
name=name,
)
)
operator = agg_obj["operator"]
if callable(operator):
aggfunc = operator
else:
func = NUMPY_FUNCTIONS.get(operator)
if not func:
raise InvalidPostProcessingError(
_(
"Invalid numpy function: %(operator)s",
operator=operator,
)
)
options = agg_obj.get("options", {})
aggfunc = partial(func, **options)
agg_funcs[name] = NamedAgg(column=column, aggfunc=aggfunc)
return agg_funcs
def _append_columns(
base_df: DataFrame, append_df: DataFrame, columns: Dict[str, str]
) -> DataFrame:
"""
Function for adding columns from one DataFrame to another DataFrame. Calls the
assign method, which overwrites the original column in `base_df` if the column
already exists, and appends the column if the name is not defined.
Note that! this is a memory-intensive operation.
:param base_df: DataFrame which to use as the base
:param append_df: DataFrame from which to select data.
:param columns: columns on which to append, mapping source column to
target column. For instance, `{'y': 'y'}` will replace the values in
column `y` in `base_df` with the values in `y` in `append_df`,
while `{'y': 'y2'}` will add a column `y2` to `base_df` based
on values in column `y` in `append_df`, leaving the original column `y`
in `base_df` unchanged.
:return: new DataFrame with combined data from `base_df` and `append_df`
"""
if all(key == value for key, value in columns.items()):
# make sure to return a new DataFrame instead of changing the `base_df`.
_base_df = base_df.copy()
_base_df.loc[:, columns.keys()] = append_df
return _base_df
append_df = append_df.rename(columns=columns)
return | pd.concat([base_df, append_df], axis="columns") | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
from collections import defaultdict
import operator
def makeGen(row):
return min(row[al], row[al+1]) + max(row[al], row[al+1])
#################################################################################
#kapa kazein A
##################################################################################
AB1 = pd.read_table('/home/jana/Genotipi/Genotipi_DATA/Genotipi_latest/Rjava/IDBv03/BetaLGB1.ped', header=None, sep=" ")
BetaLGB = pd.DataFrame({'ID': list(AB1[1]) })
cols = len(AB1.columns)
AB1 = AB1[[1] + range(6,cols)]
#tukaj združi sosednja dva alela v genotipe
for al in [i for i in range(6,cols) if i % 2 == 0]:
AB1.loc[:,str(al)+'g'] = AB1.apply(makeGen, axis=1)
#tukaj pridobi napogostejši genotip
def countOcc_max(alleles):
Count = defaultdict()
for geno in unique(alleles):
if geno != '00':
Count[geno] = alleles.count(geno)
return max(Count.iteritems(), key=operator.itemgetter(1))[0]
AB1.loc[:, 'finalGeno'] = ""
for row in AB1.index:
AB1['finalGeno'][row] = next(x for x in (list(AB1.ix[row][genoCol])) if x != '00')
genoCol.append(str(al) + 'g')
AB1.rename(columns={1:'ID', 'finalGeno':'AB1'}, inplace=True)
BetaLGB = pd.merge(BetaLGB, AB1[['ID','AB1']], on='ID')
##########################################################################
#Kappa-CSN AB SNP2
##########################################################################
AB2 = pd.read_table('/home/jana/Genotipi/Genotipi_DATA/Genotipi_latest/Rjava/IDBv03/BetaLGB2.ped', header=None, sep=" ")
cols = len(AB2.columns)
AB2 = AB2[[1] + range(6,cols)]
#tukaj združi sosednja dva alela v genotipe
genoCol = []
for al in [i for i in range(6,cols) if i % 2 == 0]:
AB2.loc[:,str(al)+'g'] = AB2.apply(makeGen, axis=1)
genoCol.append(str(al) + 'g')
#tukaj pridobi napogostejši genotip
AB2.loc[:, 'finalGeno'] = ""
for row in AB2.index:
try:
AB2['finalGeno'][row] = next(x for x in (list(AB2.ix[row][genoCol])) if x != '00' )
except:
AB2['finalGeno'][row] = '00'
AB2.rename(columns={1:'ID', 'finalGeno':'AB2'}, inplace=True)
BetaLGB = | pd.merge(BetaLGB, AB2[['ID','AB2']], on='ID') | pandas.merge |
from __future__ import print_function
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.metrics import mean_squared_error
import math
import sys
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions-targets)**2))
train_interval = sys.argv[1].split(',')
train_begin = int(train_interval[0])
train_end = int(train_interval[1])
test_interval = sys.argv[2].split(',')
test_begin = int(test_interval[0])
test_end = int(test_interval[1])
REGRESSION_PLOTS_FOLDER = "plots/regression/"
with open("results/regression/stdout_" + str(train_begin) + "_" + str(test_end) + ".txt", 'w') as f:
sys.stdout = f
print("Train = [", train_begin, ",", train_end, "]")
print("Test = [", test_begin, ",", test_end, "]")
train = {
"params": [],
"targets": []
}
test = {
"params": [],
"targets": []
}
with open("data/btc_dataset_sample.csv", "r") as btc_dataset:
header = btc_dataset.readline()
count = 0
for line in btc_dataset.readlines():
line = line.replace('\n', '')
values = line.split(';')
if count >= train_begin and count <= train_end:
train["params"].append(values[:-1])
train["targets"].append(values[-1])
elif count >= test_begin and count <= test_end:
test["params"].append(values[:-1])
test["targets"].append(values[-1])
count += 1
params = np.array(train["params"], dtype=np.float32)
targets = np.array(train["targets"], dtype=np.float32)
res = sm.OLS(targets, params).fit()
print(res.summary())
# Training plot
# fig, ax = plt.subplots(figsize=(8,6))
# ax.plot(targets, 'o', label="data")
# ax.plot(res.fittedvalues, 'r--.', label="OLS")
# ax.legend(loc='best')
# fig.suptitle("training " + str(train_begin) + "-" + str(train_end))
# fig.savefig(REGRESSION_PLOTS_FOLDER + "train" + str(train_begin) + "_" + str(train_end) + "_" + str(test_begin) + "_" + str(train_end))
params = np.array(test["params"], dtype=np.float32)
targets = np.array(test["targets"], dtype=np.float32)
predicted = res.predict(params)
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(targets, 'o', label="data")
ax.plot(predicted, 'r--.', label="OLS")
ax.legend(loc='best')
fig.suptitle("OLS Regression Test " + str(test_begin) + "-" + str(test_end))
fig.savefig(REGRESSION_PLOTS_FOLDER + "test_" + str(train_begin) + "_" + str(test_end))
# Errors
errs = []
var_errs = []
for i in range(len(targets)):
errs.append(abs(predicted[i] - targets[i])/ targets[i])
var_errs.append((predicted[i] - targets[i - 1]) - (targets[i] - targets[i - 1]) / (targets[i] - targets[i - 1]))
print("RMSE = ", rmse(predicted, targets))
print("Abs Error: ", sum(errs) / len(errs))
err_df = pd.DataFrame(errs)
err_df.plot(title="Percentage error " + str(test_begin) + "-" + str(test_end))
plt.savefig(REGRESSION_PLOTS_FOLDER + "error_" + str(train_begin) + "_" + str(test_end))
var_err_df = | pd.DataFrame(var_errs) | pandas.DataFrame |
from datetime import datetime
import pytest
from pytz import utc
import pandas._testing as tm
from pandas.tseries.holiday import (
MO,
SA,
AbstractHolidayCalendar,
DateOffset,
EasterMonday,
GoodFriday,
Holiday,
HolidayCalendarFactory,
Timestamp,
USColumbusDay,
USLaborDay,
USMartinLutherKingJr,
USMemorialDay,
USPresidentsDay,
USThanksgivingDay,
get_calendar,
next_monday,
)
def _check_holiday_results(holiday, start, end, expected):
"""
Check that the dates for a given holiday match in date and timezone.
Parameters
----------
holiday : Holiday
The holiday to check.
start : datetime-like
The start date of range in which to collect dates for a given holiday.
end : datetime-like
The end date of range in which to collect dates for a given holiday.
expected : list
The list of dates we expect to get.
"""
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert list(
holiday.dates(utc.localize(Timestamp(start)), utc.localize(Timestamp(end)))
) == [utc.localize(dt) for dt in expected]
@pytest.mark.parametrize(
"holiday,start_date,end_date,expected",
[
(
USMemorialDay,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
],
),
(
Holiday("July 4th Eve", month=7, day=3),
"2001-01-01",
"2003-03-03",
[Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00")],
),
(
Holiday("July 4th Eve", month=7, day=3, days_of_week=(0, 1, 2, 3)),
"2001-01-01",
"2008-03-03",
[
Timestamp("2001-07-03 00:00:00"),
Timestamp("2002-07-03 00:00:00"),
Timestamp("2003-07-03 00:00:00"),
Timestamp("2006-07-03 00:00:00"),
Timestamp("2007-07-03 00:00:00"),
],
),
(
EasterMonday,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
Timestamp("2011-04-25 00:00:00"),
Timestamp("2012-04-09 00:00:00"),
Timestamp("2013-04-01 00:00:00"),
Timestamp("2014-04-21 00:00:00"),
Timestamp("2015-04-06 00:00:00"),
Timestamp("2016-03-28 00:00:00"),
Timestamp("2017-04-17 00:00:00"),
Timestamp("2018-04-02 00:00:00"),
Timestamp("2019-04-22 00:00:00"),
Timestamp("2020-04-13 00:00:00"),
],
),
(
GoodFriday,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
Timestamp("2011-04-22 00:00:00"),
Timestamp("2012-04-06 00:00:00"),
Timestamp("2013-03-29 00:00:00"),
Timestamp("2014-04-18 00:00:00"),
Timestamp("2015-04-03 00:00:00"),
| Timestamp("2016-03-25 00:00:00") | pandas.tseries.holiday.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.