prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# File System
import os
import json
from pathlib import Path
from zipfile import ZipFile
import pickle
import gc
import numpy as np
import pandas as pd
from sympy.geometry import *
DATA_PATH = '../data/' # Point this constant to the location of your data archive files
EXPECTED_DATASETS = {'Colorado': [
'county_total_population.Colorado.zip',
'covid_county.Colorado.zip',
'neon_2d_wind.Colorado.zip',
'neon_barometric_pressure.Colorado.zip',
'neon_single_asp_air_temperature.Colorado.zip'
]}
counties = {'Colorado': ['Boulder', 'Grand', 'Larimer', 'Logan', 'Weld', 'Yuma']}
def get_datasets(state='Colorado'):
# Returns dataframes in order: control, covidWind, covidPressure, covidTemperature
extract(state)
create_pickles(state)
return pd.read_pickle(f'../data/control.{state}.pkl'), pd.read_pickle(f'../data/covidWind.{state}.pkl'), \
pd.read_pickle(f'../data/covidPressure.{state}.pkl'), | pd.read_pickle(f'../data/covidTemperature.{state}.pkl') | pandas.read_pickle |
"""
Functions to process model inputs and outputs.
This module provides functions that classify 5 minutes PPG time-series or PPG images
into Reliable or Unreliable for each HR-HRV features.
Copyright 2020, <NAME>
Licence: MIT, see LICENCE for more details.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime
import time, timeit
import heartpy as hp
import pandas as pd
import numpy as np
import os, sys, glob, pickle, tempfile
from PIL import Image
from sklearn.metrics import classification_report, confusion_matrix,roc_curve, auc
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras import layers, models
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.optimizers import SGD, Adam
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Dense, Input
from keras.models import Model, load_model
from keras.utils import to_categorical
from keras.initializers import glorot_uniform
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras import applications
import keras
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (12, 10)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
BATCH_SIZE = 1024
EPOCHS = 15
def rgba2rgb():
frames_dir = './data/frames/'
frames = sorted(glob.glob(frames_dir+'/*'))
X_train = np.zeros((len(frames),224,224,3), dtype='uint8')
for ind in range(len(frames)):
png = Image.open(frames[ind])
png.load() # required for png.split()
background = Image.new("RGB", png.size, (255, 255, 255))
background.paste(png, mask=png.split()[3]) # 3 is the alpha channel
img = np.asarray(background)
X_train[ind] = img
np.save('frames.npy', X_train)
return
# # Dataset Prepartion
def load_1Ddataset(feature):
train_data = pd.read_csv('data/train.csv', header=None).to_numpy()
train_label = pd.read_csv('data/{}_label.csv'.format(feature), header=None).to_numpy()
train_data = train_data[:,:,np.newaxis]
neg, pos = np.bincount(train_label.ravel())
total = neg + pos
weight_for_0 = (1 / neg)*(total)/2.0
weight_for_1 = (1 / pos)*(total)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
initial_bias = np.log([pos/neg])
trX, valX, trY, valY = train_test_split(train_data, train_label, test_size=0.2, random_state=42)
# one hot encode y
trY = to_categorical(trY)
valY = to_categorical(valY)
return trX, valX, trY, valY, initial_bias, class_weight
# # Dataset Prepartion
def load_2Ddataset(feature):
# train_data = rgba2rgb()
train_data = np.load('frames.npy')
train_data = train_data / 255.0
train_label = pd.read_csv('data/{}_label.csv'.format(feature), header=None).to_numpy()
neg, pos = np.bincount(train_label.ravel())
total = neg + pos
weight_for_0 = (1 / neg)*(total)/2.0
weight_for_1 = (1 / pos)*(total)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
initial_bias = np.log([pos/neg])
trX, valX, trY, valY = train_test_split(train_data, train_label, test_size=0.2, random_state=42)
# one hot encode y
trY = to_categorical(trY)
valY = to_categorical(valY)
return trX, valX, trY, valY, initial_bias, class_weight
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def create_1Dcnn(n_filters=32, output_bias=None):
# if using tensorflow 2 you can substitute the following metrics with the current ones
# METRICS = [
# keras.metrics.TruePositives(name='tp'),
# keras.metrics.FalsePositives(name='fp'),
# keras.metrics.TrueNegatives(name='tn'),
# keras.metrics.FalseNegatives(name='fn'),
# keras.metrics.BinaryAccuracy(name='accuracy'),
# keras.metrics.Precision(name='precision'),
# keras.metrics.Recall(name='recall'),
# keras.metrics.AUC(name='auc'),
# ]
if output_bias is not None:
output_bias = tf.keras.initializers.Constant(output_bias)
model = models.Sequential()
model.add(layers.Conv1D(filters=n_filters, kernel_initializer='he_normal', kernel_size=3, activation='relu', input_shape=(60*20,1)))
model.add(layers.BatchNormalization())
model.add(layers.Conv1D(filters=n_filters*2, kernel_initializer='he_normal', kernel_size=3, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.Flatten())
model.add(layers.Dense(348, kernel_initializer='he_normal', activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(2,activation='sigmoid', bias_initializer=output_bias))
# myAdam = tf.keras.optimizers.Adam(lr=0.00001)
myAdam = Adam(lr=0.00001)
model.compile(optimizer=myAdam,
loss='binary_crossentropy',
metrics=['accuracy', recall_m, precision_m, f1_m])
return model
def create_2Dcnn(model_name, output_bias=None):
# if using tensorflow 2 you can substitute the following metrics with the current ones
# METRICS = [
# keras.metrics.TruePositives(name='tp'),
# keras.metrics.FalsePositives(name='fp'),
# keras.metrics.TrueNegatives(name='tn'),
# keras.metrics.FalseNegatives(name='fn'),
# keras.metrics.BinaryAccuracy(name='accuracy'),
# keras.metrics.Precision(name='precision'),
# keras.metrics.Recall(name='recall'),
# keras.metrics.AUC(name='auc'),
# ]
if output_bias is not None:
output_bias = tf.keras.initializers.Constant(output_bias)
if model_name=='VGG16':
base_model = applications.vgg16.VGG16(weights='imagenet', include_top=False)
elif model_name=='ResNet50':
base_model = keras.applications.keras_applications.resnet.ResNet50(weights='imagenet', include_top=False, backend=keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
elif model_name=='MobileNetV2':
base_model = keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet', include_top=False, backend=keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
for layer in base_model.layers:
layer.trainable = False
input = Input(shape=(224,224,3))
# x = base_model.output
x = base_model(input)
x = Flatten()(x)
# x = GlobalMaxPooling2D()(x)
x = Dense(128, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(64, activation='relu')(x)
predictions = Dense(2, activation='sigmoid', bias_initializer=output_bias)(x)
model = Model(inputs=input, outputs=predictions)
myAdam = Adam(lr=0.00005)
model.compile(optimizer=myAdam,
loss='binary_crossentropy',
metrics=['accuracy', recall_m, precision_m, f1_m])
return model
# # Fit and Evaluate Model
def evaluate_1Dmodel(trX, valX, trY, valY, initial_bias, class_weight, feat, model_name, load=False):
verbose, epochs, batch_size = 2, 15, 1024
# Prepare callbacks for model saving and for learning rate adjustment.
filepath = save_model(feat, model_name=model_name)
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_auc',
verbose=1,
save_best_only=True)
callbacks = [checkpoint]
model = create_1Dcnn(n_filters=32, output_bias=initial_bias)
if load:
filepath = load_model(feat, model_name)
model.load_weights(filepath)
# fit network
history = model.fit(trX, trY, epochs=epochs, validation_data=(valX,valY), batch_size=batch_size, verbose=verbose, callbacks=callbacks, class_weight=class_weight)
with open('./saved_results/{}_{}_trainHistoryDict'.format(feat, model_name), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
else:
# fit network
history = model.fit(trX, trY, epochs=epochs, validation_data=(valX,valY), batch_size=batch_size, verbose=verbose, callbacks=callbacks, class_weight=class_weight)
with open('./saved_results/{}_{}_trainHistoryDict'.format(feat, model_name), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
train_pred = model.predict(trX, batch_size=1024)
test_pred = model.predict(valX, batch_size=1024)
# evaluate model
results = model.evaluate(valX, valY, batch_size=batch_size, verbose=0)
return model.metrics_names, history, results, train_pred, test_pred
def evaluate_2Dmodel(trX, valX, trY, valY, initial_bias, class_weight, feat, model_name, load=False):
verbose, epochs, batch_size = 2, 15, 1024
# Prepare callbacks for model saving and for learning rate adjustment.
filepath = save_model(feat, model_name)
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_auc',
verbose=2,
save_best_only=True)
callbacks = [checkpoint]
model = create_2Dcnn(model_name, output_bias=initial_bias)
if load:
filepath = load_model(feat, model_name)
model.load_weights(filepath)
# fit network
history = model.fit(trX, trY, epochs=epochs, validation_data=(valX,valY), batch_size=batch_size, verbose=verbose, callbacks=callbacks, class_weight=class_weight)
with open('./saved_results/{}_{}_trainHistoryDict'.format(feat, model_name), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
else:
# fit network
history = model.fit(trX, trY, epochs=epochs, validation_data=(valX,valY), batch_size=batch_size, verbose=verbose, callbacks=callbacks, class_weight=class_weight)
with open('./saved_results/{}_{}_trainHistoryDict'.format(feat, model_name), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
train_pred = model.predict(trX, batch_size=1024)
test_pred = model.predict(valX, batch_size=1024)
# evaluate model
results = model.evaluate(valX, valY, batch_size=batch_size, verbose=0)
return model.metrics_names, history, results, train_pred, test_pred
def summarize_results(names, scores, feature, model_names):
df = | pd.DataFrame(scores, index=model_names, columns=names) | pandas.DataFrame |
"""
Tests for live trading.
"""
from unittest import TestCase
from datetime import time
from collections import defaultdict
import pandas as pd
import numpy as np
# fix to allow zip_longest on Python 2.X and 3.X
try: # Python 3
from itertools import zip_longest
except ImportError: # Python 2
from itertools import izip_longest as zip_longest
from functools import partial
import os
from math import fabs
from mock import patch, sentinel, Mock, MagicMock
from testfixtures import tempdir
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.Execution import Execution
from ib.ext.OrderState import OrderState
import alpaca_trade_api.rest as apca
from zipline.algorithm import TradingAlgorithm
from zipline.algorithm_live import LiveTradingAlgorithm, LiveAlgorithmExecutor
from zipline.data.data_portal_live import DataPortalLive
from zipline.gens.realtimeclock import (RealtimeClock,
SESSION_START,
BEFORE_TRADING_START_BAR)
from zipline.finance.order import Order as ZPOrder
from zipline.finance.blotter_live import BlotterLive
from zipline.gens.sim_engine import MinuteSimulationClock
from zipline.gens.brokers.broker import Broker
from zipline.gens.brokers.ib_broker import IBBroker, TWSConnection
from zipline.gens.brokers.alpaca_broker import ALPACABroker
from zipline.testing.fixtures import WithSimParams
from zipline.finance.execution import (StopLimitOrder,
MarketOrder,
StopOrder,
LimitOrder)
from zipline.finance.order import ORDER_STATUS
from zipline.finance.transaction import Transaction
from zipline.utils.calendars import get_calendar
from zipline.utils.calendars.trading_calendar import days_at_time
from zipline.utils.serialization_utils import load_context, store_context
from zipline.testing.fixtures import (ZiplineTestCase,
WithTradingEnvironment,
WithDataPortal)
from zipline.errors import CannotOrderDelistedAsset
class TestRealtimeClock(TestCase):
@classmethod
def setUpClass(cls):
cls.nyse_calendar = get_calendar("NYSE")
cls.sessions = cls.nyse_calendar.sessions_in_range(
pd.Timestamp("2017-04-20"),
pd.Timestamp("2017-04-20")
)
trading_o_and_c = cls.nyse_calendar.schedule.ix[cls.sessions]
cls.opens = trading_o_and_c['market_open']
cls.closes = trading_o_and_c['market_close']
def setUp(self):
self.internal_clock = None
self.events = defaultdict(list)
def advance_clock(self, x):
"""Mock function for sleep. Advances the internal clock by 1 min"""
# The internal clock advance time must be 1 minute to match
# MinutesSimulationClock's update frequency
self.internal_clock += pd.Timedelta('1 min')
def get_clock(self, arg, *args, **kwargs):
"""Mock function for pandas.to_datetime which is used to query the
current time in RealtimeClock"""
assert arg == "now"
return self.internal_clock
def test_crosscheck_realtimeclock_with_minutesimulationclock(self):
"""Tests that RealtimeClock behaves like MinuteSimulationClock"""
for minute_emission in (False, True):
# MinuteSimulationClock also relies on to_datetime, shall not be
# created in the patch block
msc = MinuteSimulationClock(
self.sessions,
self.opens,
self.closes,
days_at_time(self.sessions, time(8, 45), "US/Eastern"),
minute_emission
)
msc_events = list(msc)
with patch('zipline.gens.realtimeclock.pd.to_datetime') as to_dt, \
patch('zipline.gens.realtimeclock.sleep') as sleep:
rtc = iter(RealtimeClock(
self.sessions,
self.opens,
self.closes,
days_at_time(self.sessions, time(8, 45), "US/Eastern"),
minute_emission
))
self.internal_clock = \
pd.Timestamp("2017-04-20 00:00", tz='UTC')
to_dt.side_effect = self.get_clock
sleep.side_effect = self.advance_clock
rtc_events = list(rtc)
for rtc_event, msc_event in zip_longest(rtc_events, msc_events):
self.assertEquals(rtc_event, msc_event)
self.assertEquals(len(rtc_events), len(msc_events))
def test_time_skew(self):
"""Tests that RealtimeClock's time_skew parameter behaves as
expected"""
for time_skew in (pd.Timedelta("2 hour"), pd.Timedelta("-120 sec")):
with patch('zipline.gens.realtimeclock.pd.to_datetime') as to_dt, \
patch('zipline.gens.realtimeclock.sleep') as sleep:
clock = RealtimeClock(
self.sessions,
self.opens,
self.closes,
days_at_time(self.sessions, time(11, 31), "US/Eastern"),
False,
time_skew
)
to_dt.side_effect = self.get_clock
sleep.side_effect = self.advance_clock
start_time = pd.Timestamp("2017-04-20 15:31", tz='UTC')
self.internal_clock = start_time
events = list(clock)
# Event 0 is SESSION_START which always happens at 00:00.
ts, event_type = events[1]
self.assertEquals(ts, start_time + time_skew)
def test_midday_start(self):
"""Tests that RealtimeClock is able to execute if started mid-day"""
msc = MinuteSimulationClock(
self.sessions,
self.opens,
self.closes,
days_at_time(self.sessions, time(8, 45), "US/Eastern"),
False
)
msc_events = list(msc)
with patch('zipline.gens.realtimeclock.pd.to_datetime') as to_dt, \
patch('zipline.gens.realtimeclock.sleep') as sleep:
rtc = RealtimeClock(
self.sessions,
self.opens,
self.closes,
days_at_time(self.sessions, time(8, 45), "US/Eastern"),
False
)
to_dt.side_effect = self.get_clock
sleep.side_effect = self.advance_clock
self.internal_clock = pd.Timestamp("2017-04-20 15:00", tz='UTC')
rtc_events = list(rtc)
# Count the mid-day position in the MinuteSimulationClock's events:
# Simulation Tick: 2017-04-20 00:00:00+00:00 - 1 (SESSION_START)
# Simulation Tick: 2017-04-20 12:45:00+00:00 - 4 (BEFORE_TRADING_START)
# Simulation Tick: 2017-04-20 13:31:00+00:00 - 0 (BAR)
msc_midday_position = 2 + 90
self.assertEquals(rtc_events[0], msc_events[0]) # Session start bar
# before_trading_start is fired immediately if we're after 8:45 EDT
event_time, event_type = rtc_events[1]
self.assertEquals(event_time,
pd.Timestamp("2017-04-20 15:00", tz='UTC'))
self.assertEquals(event_type, BEFORE_TRADING_START_BAR)
self.assertEquals(rtc_events[2:], msc_events[msc_midday_position:])
def test_afterhours_start(self):
"""Tests that RealtimeClock returns immediately if started after RTH"""
with patch('zipline.gens.realtimeclock.pd.to_datetime') as to_dt, \
patch('zipline.gens.realtimeclock.sleep') as sleep:
rtc = RealtimeClock(
self.sessions,
self.opens,
self.closes,
days_at_time(self.sessions, time(8, 45), "US/Eastern"),
False
)
to_dt.side_effect = self.get_clock
sleep.side_effect = self.advance_clock
self.internal_clock = pd.Timestamp("2017-04-20 20:05", tz='UTC')
events = list(rtc)
self.assertEquals(len(events), 2)
# SESSION_START & which always triggered.
_, event_type = events[0]
self.assertEquals(event_type, SESSION_START)
event_time, event_type = events[1]
self.assertEquals(event_time,
pd.Timestamp("2017-04-20 20:05", tz='UTC'))
self.assertEquals(event_type, BEFORE_TRADING_START_BAR)
class TestPersistence(WithSimParams, WithTradingEnvironment, ZiplineTestCase):
def noop(*args, **kwargs):
pass
def make_trading_algo(self, state_filename, algo_filename=None,
initialize=noop, handle_data=noop):
return LiveTradingAlgorithm(
namespace={},
env=self.make_trading_environment(),
get_pipeline_loader=self.make_load_function(),
sim_params=self.make_simparams(),
state_filename=state_filename,
algo_filename=algo_filename,
initialize=initialize,
handle_data=handle_data,
script=None)
@tempdir()
def test_live_trading_algorithm_creates_state_file(self, tmpdir):
algo_text = b"""
def initialize(context):
pass
def handle_data(context, data):
pass
"""
algo_filename = "algo.py"
algo_path = tmpdir.write(algo_filename, algo_text)
state_filename = os.path.join(tmpdir.path, "state_file")
algo = self.make_trading_algo(state_filename, algo_path)
assert not os.path.exists(state_filename)
algo.initialize()
assert os.path.getsize(state_filename) > 0
@tempdir()
def test_live_trading_algorithm_loads_state_file(self, tmpdir):
state_filename = os.path.join(tmpdir.path, "state_file")
def initialize_1(context):
context.state_from_initialize = 7
def handle_data_1(context, data):
context.state_from_handle_data = 11
algo_1 = self.make_trading_algo(state_filename,
initialize=initialize_1,
handle_data=handle_data_1)
algo_1.initialize()
algo_1.handle_data(data=sentinel.data)
def initialize_2(context):
assert False, "initialize shouldn't be called if state is loaded"
def handle_data_2(context, data):
assert False, "handle_data shouldn't be called"
algo_2 = self.make_trading_algo(state_filename,
initialize=initialize_2,
handle_data=handle_data_2)
algo_2.initialize()
assert algo_2.state_from_initialize == 7
assert algo_2.state_from_handle_data == 11
@tempdir()
def test_state_load_with_corrupt_state(self, tmpdir):
state_filename = os.path.join(tmpdir.path, "state_file")
algo_1 = self.make_trading_algo(state_filename,
initialize=TestPersistence.noop,
handle_data=TestPersistence.noop)
tmpdir.write("state_file", b"roken")
with self.assertRaises(ValueError) as e:
algo_1.initialize()
assert "state file" in str(e.exception)
@tempdir()
def test_context_persistence_checksum(self, tmpdir):
algo_text_1 = b"""
def initialize(context):
context.state_from_initialize = 11
def handle_data(context, data):
context.state_from_handle_data = 13
"""
algo_filename_1 = "algo_1.py"
algo_path_1 = tmpdir.write(algo_filename_1, algo_text_1)
state_filename_1 = os.path.join(tmpdir.path, "state_file_1")
algo_1 = self.make_trading_algo(state_filename_1,
algo_filename=algo_path_1)
algo_1.initialize()
algo_1.handle_data(data=sentinel.data)
algo_text_2 = b"""
def initialize(context):
context.state_from_initialize = 7
def handle_data(context, data):
context.state_from_handle_data = 5
"""
algo_filename_2 = "algo_2.py"
algo_path_2 = tmpdir.write(algo_filename_2, algo_text_2)
state_filename_2 = os.path.join(tmpdir.path, "state_file_2")
algo_2 = self.make_trading_algo(state_filename_2,
algo_filename=algo_path_2)
algo_2.initialize()
algo_2.handle_data(data=sentinel.data)
algo_1_wrong_state = self.make_trading_algo(state_filename_2,
algo_filename=algo_path_1)
algo_2_wrong_state = self.make_trading_algo(state_filename_1,
algo_filename=algo_path_2)
with self.assertRaises(TypeError) as e1:
algo_1_wrong_state.initialize()
assert "state file" in str(e1.exception)
with self.assertRaises(TypeError) as e2:
algo_2_wrong_state.initialize()
assert "state file" in str(e2.exception)
@tempdir()
def test_context_persistence_exclude_list(self, tmpdir):
class Context(object):
def __init__(self, rsi=None, sma=None,
trading_client=None, event_manager=None):
self.rsi = rsi
self.sma = sma
self.trading_client = trading_client
self.event_manager = event_manager
context = Context(rsi=17.2, sma=40.4, trading_client=lambda x: x + 3,
event_manager=[None, False])
exclude_list = ['trading_client', 'event_manager']
checksum = 'robocop'
state_file_path = os.path.join(tmpdir.path, "state_file")
store_context(state_file_path, context, checksum, exclude_list)
restored_context = Context()
load_context(state_file_path, restored_context, checksum)
assert restored_context.__dict__.keys() == context.__dict__.keys()
assert restored_context.rsi == context.rsi
assert restored_context.sma == context.sma
assert restored_context.trading_client is None
assert restored_context.event_manager is None
class TestLiveTradingAlgorithm(WithSimParams,
WithDataPortal,
WithTradingEnvironment,
ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
ASSET_FINDER_EQUITY_SYMBOLS = ("SPY", "XIV")
START_DATE = pd.to_datetime('2017-01-03', utc=True)
END_DATE = pd.to_datetime('2017-04-26', utc=True)
SIM_PARAMS_DATA_FREQUENCY = 'minute'
SIM_PARAMS_EMISSION_RATE = 'minute'
def test_live_trading_supports_orders_outside_ingested_period(self):
def create_initialized_algo(trading_algorithm_class, current_dt):
def initialize(context):
pass
def handle_data(context, data):
context.order_value(context.symbol("SPY"), 100)
algo = trading_algorithm_class(
namespace={},
env=self.make_trading_environment(),
get_pipeline_loader=self.make_load_function(),
sim_params=self.make_simparams(),
state_filename='blah',
algo_filename='foo',
initialize=initialize,
handle_data=handle_data,
script=None)
algo.initialize()
algo.initialized = True # Normally this is set through algo.run()
algo.datetime = current_dt
return algo
current_dt = self.END_DATE + pd.Timedelta("1 day")
backtest_algo = create_initialized_algo(TradingAlgorithm, current_dt)
with self.assertRaises(CannotOrderDelistedAsset):
backtest_algo.handle_data(data=sentinel.data)
broker = MagicMock(spec=Broker)
live_algo = create_initialized_algo(
partial(LiveTradingAlgorithm, broker=broker), current_dt)
live_algo.trading_client = MagicMock(spec=LiveAlgorithmExecutor)
live_algo.trading_client.current_data = Mock()
live_algo.trading_client.current_data.current.return_value = 12
live_algo.handle_data(data=sentinel.data)
assert live_algo.broker.order.called
assert live_algo.trading_client.current_data.current.called
def test_data_portal_live_extends_ingested_data(self):
assets = [self.asset_finder.retrieve_asset(1), ]
rt_bars = pd.DataFrame(
index=pd.date_range(start='2017-09-28 10:11:00',
end='2017-09-28 10:45:00',
freq='1 Min', tz='utc'),
columns=pd.MultiIndex.from_product(
[assets,
['open', 'high', 'low', 'close', 'volume']]),
data=np.random.randn(35, 5)
)
broker = MagicMock(Broker)
broker.get_realtime_bars.return_value = rt_bars
data_portal_live = DataPortalLive(
broker,
asset_finder=self.data_portal.asset_finder,
trading_calendar=self.data_portal.trading_calendar,
first_trading_day=self.data_portal._first_available_session,
equity_daily_reader=(
self.bcolz_equity_daily_bar_reader
if self.DATA_PORTAL_USE_DAILY_DATA else
None
),
equity_minute_reader=(
self.bcolz_equity_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
adjustment_reader=(
self.adjustment_reader
if self.DATA_PORTAL_USE_ADJUSTMENTS else
None
),
)
# Test with overall bar count > available realtime bar count
end_dt = pd.to_datetime('2017-03-03 10:00:00', utc=True)
bar_count = 1000
combined_data = data_portal_live.get_history_window(
assets, end_dt, bar_count=bar_count, frequency='1m',
field='price', data_frequency='1m')
expected_bars = rt_bars[-bar_count:].swaplevel(0, 1, axis=1)['close']
assert len(combined_data) == bar_count
assert expected_bars.isin(combined_data).all().all()
# Test with overall bar count < available realtime bar count
end_dt = pd.to_datetime('2017-03-03 10:00:00', utc=True)
bar_count = 10
combined_data = data_portal_live.get_history_window(
assets, end_dt, bar_count=bar_count, frequency='1m',
field='price', data_frequency='1m')
expected_bars = rt_bars[-bar_count:].swaplevel(0, 1, axis=1)['close']
assert len(combined_data) == bar_count
assert expected_bars.isin(combined_data).all().all()
class TestIBBroker(WithSimParams, ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
ASSET_FINDER_EQUITY_SYMBOLS = ("SPY", "XIV")
@staticmethod
def _tws_bars():
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
tws = TWSConnection("localhost:9999:1111")
tws._add_bar('SPY', 12.4, 10,
pd.to_datetime('2017-09-27 10:30:00', utc=True),
10, 12.401, False)
tws._add_bar('SPY', 12.41, 10,
pd.to_datetime('2017-09-27 10:30:40', utc=True),
20, 12.411, False)
tws._add_bar('SPY', 12.44, 20,
pd.to_datetime('2017-09-27 10:31:10', utc=True),
40, 12.441, False)
tws._add_bar('SPY', 12.74, 5,
pd.to_datetime('2017-09-27 10:37:10', utc=True),
45, 12.741, True)
tws._add_bar('SPY', 12.99, 15,
pd.to_datetime('2017-09-27 12:10:00', utc=True),
60, 12.991, False)
tws._add_bar('XIV', 100.4, 100,
pd.to_datetime('2017-09-27 9:32:00', utc=True),
100, 100.401, False)
tws._add_bar('XIV', 100.41, 100,
pd.to_datetime('2017-09-27 9:32:20', utc=True),
200, 100.411, True)
tws._add_bar('XIV', 100.44, 200,
pd.to_datetime('2017-09-27 9:41:10', utc=True),
400, 100.441, False)
tws._add_bar('XIV', 100.74, 50,
pd.to_datetime('2017-09-27 11:42:10', utc=True),
450, 100.741, False)
return tws.bars
@staticmethod
def _create_contract(symbol):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = 'STK'
return contract
@staticmethod
def _create_order(action, qty, order_type, limit_price, stop_price):
order = Order()
order.m_action = action
order.m_totalQuantity = qty
order.m_auxPrice = stop_price
order.m_lmtPrice = limit_price
order.m_orderType = order_type
return order
@staticmethod
def _create_order_state(status_):
status = OrderState()
status.m_status = status_
return status
@staticmethod
def _create_exec_detail(order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id):
exec_detail = Execution()
exec_detail.m_orderId = order_id
exec_detail.m_shares = shares
exec_detail.m_cumQty = cum_qty
exec_detail.m_price = price
exec_detail.m_avgPrice = avg_price
exec_detail.m_time = exec_time
exec_detail.m_execId = exec_id
return exec_detail
@patch('zipline.gens.brokers.ib_broker.TWSConnection')
def test_get_spot_value(self, tws):
dt = None # dt is not used in real broker
data_freq = 'minute'
asset = self.env.asset_finder.retrieve_asset(1)
bars = {'last_trade_price': [12, 10, 11, 14],
'last_trade_size': [1, 2, 3, 4],
'total_volume': [10, 10, 10, 10],
'vwap': [12.1, 10.1, 11.1, 14.1],
'single_trade_flag': [0, 1, 0, 1]}
last_trade_times = [pd.to_datetime('2017-06-16 10:30:00', utc=True),
pd.to_datetime('2017-06-16 10:30:11', utc=True),
pd.to_datetime('2017-06-16 10:30:30', utc=True),
pd.to_datetime('2017-06-17 10:31:9', utc=True)]
index = pd.DatetimeIndex(last_trade_times)
broker = IBBroker(sentinel.tws_uri)
tws.return_value.bars = {asset.symbol: pd.DataFrame(
index=index, data=bars)}
price = broker.get_spot_value(asset, 'price', dt, data_freq)
last_trade = broker.get_spot_value(asset, 'last_traded', dt, data_freq)
open_ = broker.get_spot_value(asset, 'open', dt, data_freq)
high = broker.get_spot_value(asset, 'high', dt, data_freq)
low = broker.get_spot_value(asset, 'low', dt, data_freq)
close = broker.get_spot_value(asset, 'close', dt, data_freq)
volume = broker.get_spot_value(asset, 'volume', dt, data_freq)
# Only the last minute is taken into account, therefore
# the first bar is ignored
assert price == bars['last_trade_price'][-1]
assert last_trade == last_trade_times[-1]
assert open_ == bars['last_trade_price'][1]
assert high == max(bars['last_trade_price'][1:])
assert low == min(bars['last_trade_price'][1:])
assert close == bars['last_trade_price'][-1]
assert volume == sum(bars['last_trade_size'][1:])
def test_get_realtime_bars_produces_correct_df(self):
bars = self._tws_bars()
with patch('zipline.gens.brokers.ib_broker.TWSConnection'):
broker = IBBroker(sentinel.tws_uri)
broker._tws.bars = bars
assets = (self.env.asset_finder.retrieve_asset(1),
self.env.asset_finder.retrieve_asset(2))
realtime_history = broker.get_realtime_bars(assets, '1m')
asset_spy = self.env.asset_finder.retrieve_asset(1)
asset_xiv = self.env.asset_finder.retrieve_asset(2)
assert asset_spy in realtime_history
assert asset_xiv in realtime_history
spy = realtime_history[asset_spy]
xiv = realtime_history[asset_xiv]
assert list(spy.columns) == ['open', 'high', 'low', 'close', 'volume']
assert list(xiv.columns) == ['open', 'high', 'low', 'close', 'volume']
# There are 159 minutes between the first (XIV @ 2017-09-27 9:32:00)
# and the last bar (SPY @ 2017-09-27 12:10:00)
assert len(realtime_history) == 159
spy_non_na = spy.dropna()
xiv_non_na = xiv.dropna()
assert len(spy_non_na) == 4
assert len(xiv_non_na) == 3
assert spy_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 10:30:00', utc=True)
assert spy_non_na.iloc[0].open == 12.40
assert spy_non_na.iloc[0].high == 12.41
assert spy_non_na.iloc[0].low == 12.40
assert spy_non_na.iloc[0].close == 12.41
assert spy_non_na.iloc[0].volume == 20
assert spy_non_na.iloc[1].name == pd.to_datetime(
'2017-09-27 10:31:00', utc=True)
assert spy_non_na.iloc[1].open == 12.44
assert spy_non_na.iloc[1].high == 12.44
assert spy_non_na.iloc[1].low == 12.44
assert spy_non_na.iloc[1].close == 12.44
assert spy_non_na.iloc[1].volume == 20
assert spy_non_na.iloc[-1].name == pd.to_datetime(
'2017-09-27 12:10:00', utc=True)
assert spy_non_na.iloc[-1].open == 12.99
assert spy_non_na.iloc[-1].high == 12.99
assert spy_non_na.iloc[-1].low == 12.99
assert spy_non_na.iloc[-1].close == 12.99
assert spy_non_na.iloc[-1].volume == 15
assert xiv_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 9:32:00', utc=True)
assert xiv_non_na.iloc[0].open == 100.4
assert xiv_non_na.iloc[0].high == 100.41
assert xiv_non_na.iloc[0].low == 100.4
assert xiv_non_na.iloc[0].close == 100.41
assert xiv_non_na.iloc[0].volume == 200
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_new_order_appears_in_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
broker._tws.nextValidId(0)
asset = self.env.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
assert len(broker.orders) == 1
assert broker.orders[order.id] == order
assert order.open
assert order.asset == asset
assert order.amount == amount
assert order.limit == limit_price
assert order.stop == stop_price
assert (order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_open_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
asset = self.env.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
ib_order_id = 3
ib_contract = self._create_contract(str(asset.symbol))
action, qty, order_type, limit_price, stop_price = \
'SELL', 40, 'STP LMT', 4.3, 2
ib_order = self._create_order(
action, qty, order_type, limit_price, stop_price)
ib_state = self._create_order_state('PreSubmitted')
broker._tws.openOrder(ib_order_id, ib_contract, ib_order, ib_state)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.HELD
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_exec_details(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
asset = self.env.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
(req_id, ib_order_id, shares, cum_qty,
price, avg_price, exec_time, exec_id) = (7, 3, 12, 40,
12.43, 12.50,
'20160101 14:20', 4)
ib_contract = self._create_contract(str(asset.symbol))
exec_detail = self._create_exec_detail(
ib_order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id)
broker._tws.execDetails(req_id, ib_contract, exec_detail)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_updated_from_order_status(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
broker._tws.nextValidId(0)
# orderStatus calls only work if a respective order has been created
asset = self.env.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
ib_order_id = order.broker_order_id
status = 'Filled'
filled = 14
remaining = 9
avg_fill_price = 12.4
perm_id = 99
parent_id = 88
last_fill_price = 12.3
client_id = 1111
why_held = ''
broker._tws.orderStatus(ib_order_id,
status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id,
why_held)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.FILLED
assert not zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == amount
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_multiple_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.env.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
order_count = 0
for amount, order_style in [
(-112, StopLimitOrder(limit_price=9, stop_price=1)),
(43, LimitOrder(limit_price=10)),
(-99, StopOrder(stop_price=8)),
(-32, MarketOrder())]:
order = broker.order(asset, amount, order_style)
order_count += 1
assert order_count == len(broker.orders)
assert broker.orders[order.id] == order
is_buy = amount > 0
assert order.stop == order_style.get_stop_price(is_buy)
assert order.limit == order_style.get_limit_price(is_buy)
def test_order_ref_serdes(self):
# Even though _creater_order_ref and _parse_order_ref is private
# it is helpful to test as it plays a key role to re-create orders
order = self._create_order("BUY", 66, "STP LMT", 13.4, 44.2)
serialized = IBBroker._create_order_ref(order)
deserialized = IBBroker._parse_order_ref(serialized)
assert deserialized['action'] == order.m_action
assert deserialized['qty'] == order.m_totalQuantity
assert deserialized['order_type'] == order.m_orderType
assert deserialized['limit_price'] == order.m_lmtPrice
assert deserialized['stop_price'] == order.m_auxPrice
assert (deserialized['dt'] - pd.to_datetime('now', utc=True) <
| pd.Timedelta('10s') | pandas.Timedelta |
import glob
import os
import pandas
WHICH_IMAGING = "CQ1-ctf011-t24"
DO_I_HAVE_TO_MERGE_FILES_FIRST = True
NAME_OF_COMPOUND_WHICH_IS_CONTROL = "DMSO"
def gather_csv_data_into_one_file(path_to_csv_files, output_filename = "output"):
filenames = glob.glob(f"{path_to_csv_files}/*Stats*.csv")
print(filenames)
filenames = list([os.path.basename(f) for f in filenames])
print(filenames)
keys_of_files = [i[:-4] for i in filenames]
## check for titles longer than 31 characters -- some applications may not be able to read the file
keys_of_files_shortened = list(key[:31] for key in keys_of_files)
if len(set(keys_of_files_shortened)) < len(keys_of_files):
raise Exception
df_collect_all = None
for i, (filename_basename, filename_shortened) in enumerate(zip(keys_of_files, keys_of_files_shortened), start=1):
filename = filename_basename + ".csv"
print(f"Acting on file {i} of {len(keys_of_files)} ({filename})...")
df = pandas.read_csv(os.path.join(path_to_csv_files, filename))
RECOGNIZE_RELEVANT_COLUMN_WITH_THIS_STRING = '] Count'
column_names_which_contain_the_word_count = [col for col in df.columns if
RECOGNIZE_RELEVANT_COLUMN_WITH_THIS_STRING in col]
assert len(column_names_which_contain_the_word_count) == 1
#print(column_names_which_contain_the_word_count)
WHAT_TO_PUT_IN_FRONT_OF_NEW_NAME_OF_RELEVANT_COLUMN = "Cell_Count_"
new_name_of_relevant_column = f"{WHAT_TO_PUT_IN_FRONT_OF_NEW_NAME_OF_RELEVANT_COLUMN}{filename_shortened}"
df_renamed = df.rename(columns={ column_names_which_contain_the_word_count[0]: new_name_of_relevant_column })
#print(df_renamed)
MERGE_IF_THOSE_COLUMNS_ARE_EXACT_MATCHES = [
# "ID" is not the same in all files...
"WellID",
"Row",
"Column",
"RowName",
"ColumnName",
"WellName",
"DateTime",
"Timepoint",
"ElapsedTime",
"Description",
]
KEEP_THOSE_COLUMNS_INITIALLY = [
# "ID" is not the same in all files...
"WellID",
"Row",
"Column",
"RowName",
"ColumnName",
"WellName",
"DateTime",
"Timepoint",
"ElapsedTime",
"Description"
]
if df_collect_all is None:
df_collect_all = df_renamed[KEEP_THOSE_COLUMNS_INITIALLY]
df_collect_all["well name"] = df_renamed["WellName"].str.replace("-","")
for col in MERGE_IF_THOSE_COLUMNS_ARE_EXACT_MATCHES:
for x, y in zip(df_collect_all[col].values, df_renamed[col].values):
if pandas.isna(x) and | pandas.isna(y) | pandas.isna |
from sklearn.base import TransformerMixin
from suricate.preutils import concatixnames
import pandas as pd
class ConnectorMixin(TransformerMixin):
def __init__(self, ixname='ix', source_suffix='source', target_suffix='target'):
"""
Args:
ixname: 'ix'
source_suffix: 'source'
target_suffix: 'target'
"""
TransformerMixin.__init__(self)
self.ixname = ixname
self.source_suffix = source_suffix
self.target_suffix = target_suffix
self.ixnamesource, self.ixnametarget, self.ixnamepairs = concatixnames(
ixname=self.ixname,
source_suffix=self.source_suffix,
target_suffix=self.target_suffix
)
def fit(self, X, y=None):
return self
def transform(self, X):
"""
Args:
X:
Returns:
pd.DataFrame: with index
"""
Xt = pd.DataFrame()
return Xt
def getsbs(self, X, on_ix=None):
"""
Args:
X: input data
on_ix (pd.MultiIndex): Optional, specify the index on which you want the side-by-side view
Returns:
pd.DataFrame
"""
Xt = pd.DataFrame()
return Xt
def fetch_source(self, X, ix):
"""
Args:
X:
ix (pd.Index):
Returns:
pd.DataFrame
"""
return pd.DataFrame()
def fetch_target(self, X, ix):
"""
Args:
X:
ix (pd.Index):
Returns:
pd.DataFrame
"""
return pd.DataFrame()
def fit_transform(self, X, y=None, **fit_params):
"""
Will send back the similarity matrix of the connector with the index as DataFrame
Args:
X: input data
y:
**fit_params:
Returns:
pd.DataFrame
"""
self.fit(X=X, y=y, **fit_params)
return self.transform(X=X)
def multiindex21column(self, on_ix, sep='-'):
"""
Args:
on_ix (pd.MultiIndex): two level multi index (ix_source, ix_target)
sep: separator
Returns:
pd.Index: name 'ix', concatenation of ix_source, sep, on ix_target
"""
df = pd.DataFrame(index=on_ix)
df.reset_index(inplace=True, drop=False)
df[self.ixname] = df[[self.ixnamesource, self.ixnametarget]].astype(str).agg(str(sep).join, axis=1)
df.set_index(self.ixname, inplace=True, drop=True)
return df.index
def multiindex21column(on_ix, sep='-', ixname='ix', ixnamesource='ix_source', ixnametarget='ix_target'):
"""
Args:
on_ix (pd.MultiIndex): two level multi index (ix_source, ix_target)
sep: separator
Returns:
pd.Index: name 'ix', concatenation of ix_source, sep, ix_target
"""
df = | pd.DataFrame(index=on_ix) | pandas.DataFrame |
import functools
from threading import Thread
from contextlib import contextmanager
import signal
from scipy.stats._continuous_distns import _distn_names
import scipy
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy
wqData = waterQuality.DataModelWQ('rbWN5')
siteNoLst = wqData.siteNoLst
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-W', 'All')
dirOut = os.path.join(dirWRTDS, 'output')
dirPar = os.path.join(dirWRTDS, 'params')
# read a temp file
saveFile = os.path.join(dirOut, siteNoLst[0])
dfP = pd.read_csv(saveFile, index_col=None).set_index('date')
t = dfP.index
nt = len(dfP.index)
nc = len(usgs.newC)
ns = len(siteNoLst)
matR = np.ndarray([ns, nt, nc])
matC = np.ndarray([ns, nt, nc])
# calculate residual
t0 = time.time()
for kk, siteNo in enumerate(siteNoLst):
print('{}/{} {:.2f}'.format(
kk, len(siteNoLst), time.time()-t0))
saveFile = os.path.join(dirOut, siteNo)
dfP = pd.read_csv(saveFile, index_col=None).set_index('date')
dfP.index = | pd.to_datetime(dfP.index) | pandas.to_datetime |
"""
inspiration from R Package - PerformanceAnalytics
"""
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.analysis.util import per_series
PER_YEAR_MAP = {
'BA': 1.,
'BAS': 1.,
'A': 1.,
'AS': 1.,
'BQ': 4.,
'BQS': 4.,
'Q': 4.,
'QS': 4.,
'D': 365.,
'B': 252.,
'BMS': 12.,
'BM': 12.,
'MS': 12.,
'M': 12.,
'W': 52.,
}
def guess_freq(index):
# admittedly weak way of doing this...This needs to be abolished
if isinstance(index, (pd.Series, pd.DataFrame)):
index = index.index
if hasattr(index, 'freqstr') and index.freqstr:
return index.freqstr[0]
elif len(index) < 3:
raise Exception('cannot guess frequency with less than 3 items')
else:
lb = min(7, len(index))
idx_zip = lambda: list(zip(index[-lb:-1], index[-(lb-1):]))
diff = min([t2 - t1 for t1, t2, in idx_zip()])
if diff.days <= 1:
if 5 in index.dayofweek or 6 in index.dayofweek:
return 'D'
else:
return 'B'
elif diff.days == 7:
return 'W'
else:
diff = min([t2.month - t1.month for t1, t2, in idx_zip()])
if diff == 1:
return 'M'
diff = min([t2.year - t1.year for t1, t2, in idx_zip()])
if diff == 1:
return 'A'
strs = ','.join([i.strftime('%Y-%m-%d') for i in index[-lb:]])
raise Exception('unable to determine frequency, last %s dates %s' % (lb, strs))
def periodicity(freq_or_frame):
"""
resolve the number of periods per year
"""
if hasattr(freq_or_frame, 'rule_code'):
rc = freq_or_frame.rule_code
rc = rc.split('-')[0]
factor = PER_YEAR_MAP.get(rc, None)
if factor is not None:
return factor / abs(freq_or_frame.n)
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, str):
factor = PER_YEAR_MAP.get(freq_or_frame, None)
if factor is not None:
return factor
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, (pd.Series, pd.DataFrame, pd.TimeSeries)):
freq = freq_or_frame.index.freq
if not freq:
freq = pd.infer_freq(freq_or_frame.index)
if freq:
return periodicity(freq)
else:
# Attempt to resolve it
import warnings
freq = guess_freq(freq_or_frame.index)
warnings.warn('frequency not set. guessed it to be %s' % freq)
return periodicity(freq)
else:
return periodicity(freq)
else:
raise ValueError("periodicity expects DataFrame, Series, or rule_code property")
periods_in_year = periodicity
def _resolve_periods_in_year(scale, frame):
""" Convert the scale to an annualzation factor. If scale is None then attempt to resolve from frame. If scale is a scalar then
use it. If scale is a string then use it to lookup the annual factor
"""
if scale is None:
return periodicity(frame)
elif isinstance(scale, str):
return periodicity(scale)
elif np.isscalar(scale):
return scale
else:
raise ValueError("scale must be None, scalar, or string, not %s" % type(scale))
def excess_returns(returns, bm=0):
"""
Return the excess amount of returns above the given benchmark bm
"""
return returns - bm
def returns(prices, method='simple', periods=1, fill_method='pad', limit=None, freq=None):
"""
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
"""
if method not in ('simple', 'compound', 'log'):
raise ValueError("Invalid method type. Valid values are ('simple', 'compound')")
if method == 'simple':
return prices.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)
else:
if freq is not None:
raise NotImplementedError("TODO: implement this logic if needed")
if isinstance(prices, pd.Series):
if fill_method is None:
data = prices
else:
data = prices.fillna(method=fill_method, limit=limit)
data = np.log(data / data.shift(periods=periods))
mask = pd.isnull(prices.values)
np.putmask(data.values, mask, np.nan)
return data
else:
return pd.DataFrame(
{name: returns(col, method, periods, fill_method, limit, freq) for name, col in prices.items()},
columns=prices.columns,
index=prices.index)
def returns_cumulative(returns, geometric=True, expanding=False):
""" return the cumulative return
Parameters
----------
returns : DataFrame or Series
geometric : bool, default is True
If True, geometrically link returns
expanding : bool default is False
If True, return expanding series/frame of returns
If False, return the final value(s)
"""
if expanding:
if geometric:
return (1. + returns).cumprod() - 1.
else:
return returns.cumsum()
else:
if geometric:
return (1. + returns).prod() - 1.
else:
return returns.sum()
def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True):
""" return the rolling cumulative returns
Parameters
----------
returns : DataFrame or Series
window : number of observations
min_periods : minimum number of observations in a window
geometric : link the returns geometrically
"""
if geometric:
rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1.
else:
rc = lambda x: (x[np.isfinite(x)]).sum()
return pd.rolling_apply(returns, window, rc, min_periods=min_periods)
def returns_annualized(returns, geometric=True, scale=None, expanding=False):
""" return the annualized cumulative returns
Parameters
----------
returns : DataFrame or Series
geometric : link the returns geometrically
scale: None or scalar or string (ie 12 for months in year),
If None, attempt to resolve from returns
If scalar, then use this as the annualization factor
If string, then pass this to periodicity function to resolve annualization factor
expanding: bool, default is False
If True, return expanding series/frames.
If False, return final result.
"""
scale = _resolve_periods_in_year(scale, returns)
if expanding:
if geometric:
n = pd.expanding_count(returns)
return ((1. + returns).cumprod() ** (scale / n)) - 1.
else:
return pd.expanding_mean(returns) * scale
else:
if geometric:
n = returns.count()
return ((1. + returns).prod() ** (scale / n)) - 1.
else:
return returns.mean() * scale
def drawdowns(returns, geometric=True):
"""
compute the drawdown series for the period return series
return: periodic return Series or DataFrame
"""
wealth = 1. + returns_cumulative(returns, geometric=geometric, expanding=True)
values = wealth.values
if values.ndim == 2:
ncols = values.shape[-1]
values = np.vstack(([1.] * ncols, values))
maxwealth = pd.expanding_max(values)[1:]
dds = wealth / maxwealth - 1.
dds[dds > 0] = 0 # Can happen if first returns are positive
return dds
elif values.ndim == 1:
values = np.hstack(([1.], values))
maxwealth = pd.expanding_max(values)[1:]
dds = wealth / maxwealth - 1.
dds[dds > 0] = 0 # Can happen if first returns are positive
return dds
else:
raise ValueError('unable to process array with %s dimensions' % values.ndim)
def max_drawdown(returns=None, geometric=True, dd=None, inc_date=False):
"""
compute the max draw down.
returns: period return Series or DataFrame
dd: drawdown Series or DataFrame (mutually exclusive with returns)
"""
if (returns is None and dd is None) or (returns is not None and dd is not None):
raise ValueError('returns and drawdowns are mutually exclusive')
if returns is not None:
dd = drawdowns(returns, geometric=geometric)
if isinstance(dd, pd.DataFrame):
vals = [max_drawdown(dd=dd[c], inc_date=inc_date) for c in dd.columns]
cols = ['maxxdd'] + (inc_date and ['maxdd_dt'] or [])
res = | pd.DataFrame(vals, columns=cols, index=dd.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
KAGGLE = False
if KAGGLE:
get_ipython().system('cp ../input/gdcm-conda-install/gdcm.tar .')
get_ipython().system('tar -xvzf gdcm.tar')
get_ipython().system('conda install --offline ./gdcm/gdcm-2.8.9-py37h71b2a6d_0.tar.bz2')
get_ipython().system('pip install ../input/efficientnet-pytorch063/efficientnet_pytorch-0.6.3-py3-none-any.whl')
get_ipython().system('pip install -q ../input/noamior/monai-0.3.0-202010042353-py3-none-any.whl')
import gdcm
# In[ ]:
from efficientnet_pytorch import EfficientNet
import efficientnet_pytorch
import torch
print(efficientnet_pytorch.__version__)
# In[ ]:
def load_cnn(filepath,name):
cnn5 = EfficientNet.from_name(name).cuda()
if name == 'efficientnet-b5':
cnn5._fc = torch.nn.Linear(in_features=2048, out_features=1, bias=True)
elif name == 'efficientnet-b4':
cnn5._fc = torch.nn.Linear(in_features=1536+256, out_features=1, bias=True)
elif name == 'efficientnet-b3':
cnn5._fc = torch.nn.Linear(in_features=1536, out_features=1, bias=True)
elif name == 'efficientnet-b2':
cnn5._fc = torch.nn.Linear(in_features=1408, out_features=1, bias=True)
cnn5 = torch.nn.DataParallel(cnn5)
cnn5.load_state_dict(torch.load(filepath))
cnn5.eval()
return cnn5
# In[ ]:
import os
if not KAGGLE:
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import json
with open('settings.json') as json_file:
settings = json.load(json_file)
# In[ ]:
if KAGGLE:
cnnb3 = [load_cnn(f'../input/rsnalstm2/efficientnet-b3_cnn_{row}_best.pth','efficientnet-b3') for row in [2,3]]
cnnb4 = [load_cnn(f'../input/rsnalstm2/efficientnet-b4_cnn_{row}_best.pth','efficientnet-b4') for row in [2,4]]
cnnb5 = [load_cnn(f'../input/rsnalstm2/efficientnet-b5_cnn_{row}_best.pth','efficientnet-b5') for row in [1,3]]
else:
model_path = settings['MODEL_PATH2D']
cnnb3 = [load_cnn(f'./{model_path}/efficientnet-b3_cnn_{row}_best.pth','efficientnet-b3') for row in [2,3]]
cnnb4 = [load_cnn(f'./{model_path}/efficientnet-b4_cnn_{row}_best.pth','efficientnet-b4') for row in [2,4]]
cnnb5 = [load_cnn(f'./{model_path}/efficientnet-b5_cnn_{row}_best.pth','efficientnet-b5') for row in [1,3]]
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import glob
import cv2
import os
from matplotlib import pyplot as plt
import os
import cv2
import numpy as np
import pandas as pd
from torch.utils.data import TensorDataset, DataLoader,Dataset
import albumentations as albu
import functools
import torch
from tqdm.auto import tqdm
from joblib import Parallel, delayed
from joblib import parallel_backend
# In[ ]:
if KAGGLE:
test_csv_path = '../input/rsna-str-pulmonary-embolism-detection/test.csv'
else:
test_csv_path = settings['test_csv_path']#'./Datasets/RSNA/dicom/test.csv'
# In[ ]:
df = | pd.read_csv(test_csv_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Console script for pyvirchow."""
import os
import six
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
from pyvirchow.io.operations import get_annotation_bounding_boxes
from pyvirchow.io.operations import get_annotation_polygons
from pyvirchow.io.operations import path_leaf
from pyvirchow.io.operations import read_as_rgb
from pyvirchow.io.operations import WSIReader
from pyvirchow.io.tiling import get_all_patches_from_slide
from pyvirchow.io.tiling import save_images_and_mask, generate_tiles, generate_tiles_fast
from pyvirchow.normalization import VahadaneNormalization
from pyvirchow.morphology.patch_extractor import TissuePatch
from pyvirchow.morphology.mask import get_common_interior_polygons
from tqdm import tqdm
import warnings
from multiprocessing import Pool
from pyvirchow.segmentation import label_nuclei, summarize_region_properties
from pyvirchow.misc.parallel import ParallelExecutor
# from pyvirchow.deep_model.model import slide_level_map
# from pyvirchow.deep_model.random_forest import random_forest
from pyvirchow.misc import xmltojson
from scipy.misc import imsave
from skimage.color import rgb2hsv
from collections import defaultdict
import joblib
from joblib import delayed
from joblib import parallel_backend
import numpy as np
from six import iteritems
import click
from shapely.geometry import Polygon as shapelyPolygon
from click_help_colors import HelpColorsGroup
import glob
from PIL import Image
click.disable_unicode_literals_warning = True
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
import pandas as pd
warnings.filterwarnings("ignore", category=RuntimeWarning)
np.warnings.filterwarnings("ignore")
COLUMNS = [
"area",
"bbox_area",
"compactness",
"convex_area",
"eccentricity",
"equivalent_diameter",
"extent",
"fractal_dimension",
"inertia_tensor_eigvals_1",
"inertia_tensor_eigvals_2",
"major_axis_length",
"max_intensity",
"mean_intensity",
"mean_intensity_entire_image",
"minor_axis_length",
"moments_central_1",
"moments_central_10",
"moments_central_11",
"moments_central_12",
"moments_central_13",
"moments_central_14",
"moments_central_15",
"moments_central_16",
"moments_central_2",
"moments_central_3",
"moments_central_4",
"moments_central_5",
"moments_central_6",
"moments_central_7",
"moments_central_8",
"moments_central_9",
"moments_hu_1",
"moments_hu_2",
"moments_hu_3",
"moments_hu_4",
"moments_hu_5",
"moments_hu_6",
"moments_hu_7",
"nuclei",
"nuclei_intensity_over_entire_image",
"orientation",
"perimeter",
"solidity",
"texture",
"total_nuclei_area",
"total_nuclei_area_ratio",
]
@click.group(
cls=HelpColorsGroup, help_headers_color="yellow", help_options_color="green"
)
def cli():
"""pyvirchow: tool for processing WSIs"""
pass
@cli.command(
"create-tissue-masks",
context_settings=CONTEXT_SETTINGS,
help="Extract tissue masks",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_tissue_masks_cmd(indir, level, savedir):
"""Extract tissue only patches from tumor WSIs.
"""
tumor_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
for tumor_wsi in tqdm(tumor_wsis):
wsi = WSIReader(tumor_wsi, 40)
tissue_patch = TissuePatch(wsi, level=level)
uid = wsi.uid.replace(".tif", "")
out_file = os.path.join(
savedir, "level_{}".format(level), uid + "_TissuePatch.npy"
)
os.makedirs(os.path.dirname(out_file), exist_ok=True)
np.save(out_file, tissue_patch.otsu_thresholded)
@cli.command(
"create-annotation-masks",
context_settings=CONTEXT_SETTINGS,
help="Extract annotation masks",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_annotation_masks_cmd(indir, jsondir, level, savedir):
"""Extract annotation patches
We assume the masks have already been generated at level say x.
We also assume the files are arranged in the following heirerachy:
raw data (indir): tumor_wsis/tumor001.tif
json data (jsondir): tumor_jsons/tumor001.json
"""
tumor_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
for tumor_wsi in tqdm(tumor_wsis):
wsi = WSIReader(tumor_wsi, 40)
uid = wsi.uid.replace(".tif", "")
json_filepath = os.path.join(jsondir, uid + ".json")
if not os.path.exists(json_filepath):
print("Skipping {} as annotation json not found".format(uid))
continue
out_dir = os.path.join(savedir, "level_{}".format(level))
wsi.annotation_masked(json_filepath=json_filepath, level=level, savedir=out_dir)
@cli.command(
"extract-tumor-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract tumor patches from tumor WSIs",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option(
"--annmaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--tismaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option(
"--threshold", help="Threshold for a cell to be called tumor", default=0, type=int
)
def extract_tumor_patches_cmd(
indir, annmaskdir, tismaskdir, level, patchsize, stride, savedir, threshold
):
"""Extract tumor only patches from tumor WSIs.
We assume the masks have already been generated at level say x.
We also assume the files are arranged in the following heirerachy:
raw data (indir): tumor_wsis/tumor001.tif
masks (maskdir): tumor_masks/level_x/tumor001_AnnotationTumorMask.npy';
tumor_masks/level_x/tumor001_AnnotationNormalMask.npy';
We create the output in a similar fashion:
output (outdir): patches/tumor/level_x/tumor001_xcenter_ycenter.png
Strategy:
1. Load tumor annotated masks
2. Load normal annotated masks
3. Do subtraction tumor-normal to ensure only tumor remains.
Truth table:
tumor_mask normal_mask tumour_for_sure
1 0 1
1 1 0
1 1 0
0 1 0
"""
tumor_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
# Assume that we want to generate these patches at level 0
# So in order to ensure stride at a lower level
# this needs to be discounted
# stride = int(patchsize / (2**level))
stride = min(int(patchsize / (2 ** level)), 4)
for tumor_wsi in tqdm(tumor_wsis):
last_used_x = None
last_used_y = None
wsi = WSIReader(tumor_wsi, 40)
uid = wsi.uid.replace(".tif", "")
filepath = os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationColored.npy"
)
if not os.path.exists(filepath):
print("Skipping {} as mask not found".format(uid))
continue
normal_mask = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationNormalMask.npy"
)
)
tumor_mask = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationTumorMask.npy"
)
)
tissue_mask = np.load(
os.path.join(tismaskdir, "level_{}".format(level), uid + "_TissuePatch.npy")
)
colored_patch = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationColored.npy"
)
)
subtracted_mask = tumor_mask * 1 - normal_mask * 1
subtracted_mask[np.where(subtracted_mask < 0)] = 0
subtracted_mask = np.logical_and(subtracted_mask, tissue_mask)
x_ids, y_ids = np.where(subtracted_mask)
for x_center, y_center in zip(x_ids, y_ids):
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x_center, y_center, patchsize
)
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_topright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
# print((x_topleft, x_topright, y_topleft, y_bottomright))
mask = subtracted_mask[x_topleft:x_topright, y_topleft:y_bottomright]
# Feed only complete cancer cells
# Feed if more thatn 50% cells are cancerous!
if threshold <= 0:
threshold = 0.5 * (patchsize * patchsize)
if np.sum(mask) > threshold:
if last_used_x is None:
last_used_x = x_center
last_used_y = y_center
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_center - last_used_x)
diff_y = np.abs(y_center - last_used_y)
if diff_x >= stride and diff_y >= stride:
patch = colored_patch[
x_topleft:x_topright, y_topleft:y_bottomright, :
]
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
last_used_x = x_center
last_used_y = y_center
@cli.command(
"extract-normal-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract normal patches from tumor WSIs",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option(
"--annmaskdir", help="Root directory with all annotation mask WSIs", required=False
)
@click.option(
"--tismaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_normal_patches_cmd(
indir, annmaskdir, tismaskdir, level, patchsize, stride, savedir
):
"""Extract tumor only patches from tumor WSIs.
We assume the masks have already been generated at level say x.
We also assume the files are arranged in the following heirerachy:
raw data (indir): tumor_wsis/tumor001.tif
masks (maskdir): tumor_masks/level_x/tumor001_AnnotationTumorMask.npy';
tumor_masks/level_x/tumor001_AnnotationNormalMask.npy';
We create the output in a similar fashion:
output (outdir): patches/tumor/level_x/tumor001_xcenter_ycenter.png
Strategy:
1. Load tumor annotated masks
2. Load normal annotated masks
3. Do subtraction tumor-normal to ensure only tumor remains.
Truth table:
tumor_mask normal_mask tumour_for_sure
1 0 1
1 1 0
1 1 0
0 1 0
"""
all_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=True)
# Assume that we want to generate these patches at level 0
# So in order to ensure stride at a lower level
# this needs to be discounted
stride = min(int(patchsize / (2 ** level)), 4)
for wsi in tqdm(all_wsis):
last_used_x = None
last_used_y = None
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
tissue_mask = np.load(
os.path.join(tismaskdir, "level_{}".format(level), uid + "_TissuePatch.npy")
)
if "normal" in uid:
# Just extract based on tissue patches
x_ids, y_ids = np.where(tissue_mask)
subtracted_mask = tissue_mask
colored_patch = wsi.get_patch_by_level(0, 0, level)
elif "tumor" in uid or "test" in uid:
if not os.path.isfile(
os.path.join(
annmaskdir,
"level_{}".format(level),
uid + "_AnnotationNormalMask.npy",
)
):
print("Skipping {}".format(uid))
continue
normal_mask = np.load(
os.path.join(
annmaskdir,
"level_{}".format(level),
uid + "_AnnotationNormalMask.npy",
)
)
tumor_mask = np.load(
os.path.join(
annmaskdir,
"level_{}".format(level),
uid + "_AnnotationTumorMask.npy",
)
)
colored_patch = np.load(
os.path.join(
annmaskdir, "level_{}".format(level), uid + "_AnnotationColored.npy"
)
)
subtracted_mask = normal_mask * 1 - tumor_mask * 1
subtracted_mask[np.where(subtracted_mask < 0)] = 0
subtracted_mask = np.logical_and(subtracted_mask, tissue_mask)
x_ids, y_ids = np.where(subtracted_mask)
for x_center, y_center in zip(x_ids, y_ids):
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x_center, y_center, patchsize
)
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_topright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
mask = subtracted_mask[x_topleft:x_topright, y_topleft:y_bottomright]
# Feed if more thatn 50% masks are positive
if np.sum(mask) > 0.5 * (patchsize * patchsize):
if last_used_x is None:
last_used_x = x_center
last_used_y = y_center
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_center - last_used_x)
diff_y = np.abs(y_center - last_used_y)
if diff_x >= stride and diff_y >= stride:
patch = colored_patch[
x_topleft:x_topright, y_topleft:y_bottomright, :
]
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
last_used_x = x_center
last_used_y = y_center
@cli.command(
"patches-from-coords",
context_settings=CONTEXT_SETTINGS,
help="Extract patches from coordinates file",
)
@click.option("--indir", help="Root directory with all WSIs", required=True)
@click.option("--csv", help="Path to csv with coordinates", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_patches_from_coords_cmd(indir, csv, level, patchsize, savedir):
"""Extract patches from coordinates file at a particular level.
Assumption: Coordinates are assumed to be provided at level 0.
"""
patches_to_extract = defaultdict(list)
with open(csv) as fh:
for line in fh:
try:
filename, x0, y0 = line.split(",")
except:
splitted = line.split("_")
# test files have name like test_001
if len(splitted) == 5:
fileprefix, fileid, x0, y0, _ = splitted
filename = "{}_{}".format(fileprefix, fileid)
elif len(splitted) == 4:
filename, x0, y0, _ = splitted
else:
raise RuntimeError(
"Unable to find parsable format. Mustbe filename,x0,y-"
)
# other files have name like normal001
filename = filename.lower()
x0 = int(x0)
y0 = int(y0)
patches_to_extract[filename].append((x0, y0))
for filename, coordinates in tqdm(list(patches_to_extract.items())):
if "normal" in filename:
filepath = os.path.join(indir, "normal", filename + ".tif")
elif "tumor" in filename:
filepath = os.path.join(indir, "tumor", filename + ".tif")
elif "test" in filename:
filepath = os.path.join(indir, filename + ".tif")
else:
raise RuntimeError("Malformed filename?: {}".format(filename))
wsi = WSIReader(filepath, 40)
uid = wsi.uid.replace(".tif", "")
for x0, y0 in coordinates:
patch = wsi.get_patch_by_level(x0, y0, level, patchsize)
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x0, y0, patchsize
)
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
@cli.command(
"extract-test-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract patches from testing dataset",
)
@click.option("--indir", help="Root directory with all WSIs", required=True)
@click.option(
"--tismaskdir", help="Root directory with all annotation mask WSIs", required=True
)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option(
"--stride",
default=64,
help="Slide windows by this much to get the next [atj]",
required=True,
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_test_patches_cmd(indir, tismaskdir, level, patchsize, stride, savedir):
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
for wsi in tqdm(wsis):
last_used_y = None
last_used_x = None
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
tissue_mask = np.load(
os.path.join(tismaskdir, "level_{}".format(level), uid + "_TissuePatch.npy")
)
x_ids, y_ids = np.where(tissue_mask)
for x_center, y_center in zip(x_ids, y_ids):
out_file = "{}/level_{}/{}_{}_{}_{}.png".format(
savedir, level, uid, x_center, y_center, patchsize
)
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_topright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
mask = tissue_mask[x_topleft:x_topright, y_topleft:y_bottomright]
if np.sum(mask) > 0.5 * (patchsize * patchsize):
if last_used_x is None:
last_used_x = x_center
last_used_y = y_center
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_center - last_used_x)
diff_y = np.abs(y_center - last_used_y)
if diff_x >= stride or diff_y >= stride:
colored_patch = wsi.get_patch_by_level(0, 0, level)
patch = colored_patch[
x_topleft:x_topright, y_topleft:y_bottomright, :
]
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
last_used_x = x_center
last_used_y = y_center
@cli.command(
"estimate-patches",
context_settings=CONTEXT_SETTINGS,
help="Estimate number of extractable tumor patches from tumor WSIs",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def estimate_patches_cmd(indir, jsondir, level, patchsize, stride, savedir):
all_wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
out_dir = os.path.join(savedir, "level_{}".format(level))
os.makedirs(out_dir, exist_ok=True)
for wsi in tqdm(all_wsis):
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
json_filepath = os.path.join(jsondir, uid + ".json")
if not os.path.exists(json_filepath):
print("Skipping {} as annotation json not found".format(uid))
continue
bounding_boxes = get_annotation_bounding_boxes(json_filepath)
polygons = get_annotation_polygons(json_filepath)
tumor_bb = bounding_boxes["tumor"]
normal_bb = bounding_boxes["normal"]
normal_polygons = polygons["normal"]
tumor_polygons = polygons["tumor"]
polygons_dict = {"normal": normal_polygons, "tumor": tumor_polygons}
rectangles_dict = {"normal": normal_bb, "tumor": tumor_bb}
for polygon_key, polygons in iteritems(polygons_dict):
bb = rectangles_dict[polygon_key]
to_write = ""
with open(os.path.join(savedir, "{}.txt", "w")) as fh:
for rectangle, polygon in zip(bb, polygons):
"""
Sample points from rectangle. We will assume we are sampling the
centers of our patch. So if we sample x_center, y_center
from this rectangle, we need to ensure (x_center +/- patchsize/2, y_center +- patchsize/2)
lie inside the polygon
"""
xmin, ymax = rectangle["top_left"]
xmax, ymin = rectangle["bottom_right"]
path = polygon.get_path()
for x_center in np.arange(xmin, xmax, patchsize):
for y_center in np.arange(ymin, ymax, patchsize):
x_topleft = int(x_center - patchsize / 2)
y_topleft = int(y_center - patchsize / 2)
x_bottomright = x_topleft + patchsize
y_bottomright = y_topleft + patchsize
if path.contains_points(
[(x_topleft, y_topleft), (x_bottomright, y_bottomright)]
).all():
to_write = "{}_{}_{}_{}\n".format(
uid, x_center, y_center, patchsize
)
fh.write(to_write)
def process_wsi(data):
wsi, jsondir, patchsize, stride, level, dirs, write_image = data
wsi = WSIReader(wsi, 40)
uid = wsi.uid.replace(".tif", "")
scale_factor = wsi.get_level_scale_factor(level)
json_filepath = os.path.join(jsondir, uid + ".json")
if not os.path.isfile(json_filepath):
return
boxes = get_annotation_bounding_boxes(json_filepath)
polygons = get_annotation_polygons(json_filepath)
polygons_to_exclude = {"tumor": [], "normal": []}
for polygon in polygons["tumor"]:
# Does this have any of the normal polygons inside it?
polygons_to_exclude["tumor"].append(
get_common_interior_polygons(polygon, polygons["normal"])
)
for polygon in polygons["normal"]:
# Does this have any of the tumor polygons inside it?
polygons_to_exclude["normal"].append(
get_common_interior_polygons(polygon, polygons["tumor"])
)
for polygon_key in list(polygons.keys()):
last_used_x = None
last_used_y = None
annotated_polygons = polygons[polygon_key]
annotated_boxes = boxes[polygon_key]
# iterate through coordinates in the bounding rectangle
# tand check if they overlap with any other annoations and
# if not fetch a patch at that coordinate from the wsi
annotation_index = 0
for annotated_polygon, annotated_box in zip(
annotated_polygons, annotated_boxes
):
annotation_index += 1
minx, miny = annotated_box["top_left"]
maxx, miny = annotated_box["top_right"]
maxx, maxy = annotated_box["bottom_right"]
minx, maxy = annotated_box["bottom_left"]
width = int(maxx) - int(minx)
height = int(maxy) - int(miny)
# (minx, miny), width, height = annotated_box['top_left'], annotated_box['top'].get_xy()
# Should scale?
# No. Do not scale here as the patch is always
# fetched from things at level0
minx = int(minx) # * scale_factor)
miny = int(miny) # * scale_factor)
maxx = int(maxx) # * scale_factor)
maxy = int(maxy) # * scale_factor)
width = int(width * scale_factor)
height = int(height * scale_factor)
annotated_polygon = np.array(annotated_polygon.get_xy())
annotated_polygon = annotated_polygon * scale_factor
# buffer ensures the resulting polygon is clean
# http://toblerity.org/shapely/manual.html#object.buffer
try:
annotated_polygon_scaled = shapelyPolygon(
np.round(annotated_polygon).astype(int)
).buffer(0)
except:
warnings.warn(
"Skipping creating annotation index {} for {}".format(
annotation_index, uid
)
)
continue
assert (
annotated_polygon_scaled.is_valid
), "Found invalid annotated polygon: {} {}".format(
uid, shapelyPolygon(annotated_polygon).is_valid
)
for x_left in np.arange(minx, maxx, 1):
for y_top in np.arange(miny, maxy, 1):
x_right = x_left + patchsize
y_bottom = y_top + patchsize
if last_used_x is None:
last_used_x = x_left
last_used_y = y_top
diff_x = stride
diff_y = stride
else:
diff_x = np.abs(x_left - last_used_x)
diff_y = np.abs(y_top - last_used_y)
# print(last_used_x, last_used_y, x_left, y_top, diff_x, diff_y)
if diff_x <= stride or diff_y <= stride:
continue
else:
last_used_x = x_left
last_used_y = y_top
patch_polygon = shapelyPolygon(
[
(x_left, y_top),
(x_right, y_top),
(x_right, y_bottom),
(x_left, y_bottom),
]
).buffer(0)
assert (
patch_polygon.is_valid
), "Found invalid polygon: {}_{}_{}".format(uid, x_left, y_top)
try:
is_inside = annotated_polygon_scaled.contains(patch_polygon)
except:
# Might raise an exception when the two polygons
# are the same
warnings.warn(
"Skipping: {}_{}_{}_{}.png | Equals: {} | Almost equals: {}".format(
uid, x_left, y_top, patchsize
),
annotated_polygon_scaled.equals(patch_polygon),
annotated_polygon_scaled.almost_equals(patch_polygon),
)
continue
if write_image:
out_file = os.path.join(
dirs[polygon_key],
"{}_{}_{}_{}.png".format(uid, x_left, y_top, patchsize),
)
patch = wsi.get_patch_by_level(x_left, y_top, level, patchsize)
os.makedirs(os.path.dirname(out_file), exist_ok=True)
img = Image.fromarray(patch)
img.save(out_file)
else:
# Just write the coordinates
to_write = "{}_{}_{}_{}\n".format(uid, x_left, y_top, patchsize)
out_file = os.path.join(
dirs[polygon_key], "{}.txt".format(polygon_key)
)
with open(out_file, "a") as fh:
fh.write(to_write)
@cli.command(
"extract-test-both-patches",
context_settings=CONTEXT_SETTINGS,
help="Extract both normal and tumor patches from tissue masks",
)
@click.option("--indir", help="Root directory with all test WSIs", required=True)
@click.option(
"--patchsize", type=int, default=128, help="Patch size which to extract patches"
)
@click.option("--stride", type=int, default=128, help="Stride to generate next patch")
@click.option("--jsondir", help="Root directory with all jsons", required=True)
@click.option(
"--level", type=int, help="Level at which to extract patches", required=True
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--write_image", help="Should output images", is_flag=True)
def extract_test_both_cmd(
indir, patchsize, stride, jsondir, level, savedir, write_image
):
"""Extract tissue only patches from tumor WSIs.
"""
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
out_dir = os.path.join(savedir, "level_{}".format(level))
normal_dir = os.path.join(out_dir, "normal")
tumor_dir = os.path.join(out_dir, "tumor")
os.makedirs(out_dir, exist_ok=True)
os.makedirs(normal_dir, exist_ok=True)
os.makedirs(tumor_dir, exist_ok=True)
dirs = {"normal": normal_dir, "tumor": tumor_dir}
total_wsi = len(wsis)
data = [(wsi, jsondir, patchsize, stride, level, dirs, write_image) for wsi in wsis]
with tqdm(total=total_wsi) as pbar:
with Pool(processes=16) as p:
for i, _ in enumerate(p.imap_unordered(process_wsi, data)):
# print(i / total_wsi * 100)
pbar.update()
# for i, wsi in tqdm(enumerate(list(wsis))):
# process_wsi(wsi)
# pbar.update()
def process_segmentation(data):
"""
Parameters
----------
data: tuple
(png_location, tsv_outpath)
"""
png, saveto = data
patch = read_as_rgb(png)
region_properties, _ = label_nuclei(patch, draw=False)
summary = summarize_region_properties(region_properties, patch)
df = pd.DataFrame([summary])
df.to_csv(saveto, index=False, header=True, sep="\t")
@cli.command(
"segment",
context_settings=CONTEXT_SETTINGS,
help="Performs segmentation and extract-features",
)
@click.option("--indir", help="Root directory with all pngs", required=True)
@click.option("--outdir", help="Output directory to out tsv", required=True)
def segementation_cmd(indir, outdir):
"""Perform segmentation and store the tsvs
"""
list_of_pngs = list(glob.glob(indir + "/*.png"))
data = []
for f in list_of_pngs:
tsv = f.replace(os.path.dirname(f), outdir).replace(".png", ".tsv")
if not os.path.isfile(tsv):
data.append((f, tsv))
elif os.stat(tsv).st_size == 0:
data.appen((f, tsv))
os.makedirs(outdir, exist_ok=True)
with tqdm(total=len(data)) as pbar:
with Pool(processes=16) as p:
for i, _ in enumerate(p.imap_unordered(process_segmentation, data)):
pbar.update()
def _process_patches_df(data):
slide_path, json_filepath, patch_size, saveto = data
df = get_all_patches_from_slide(
slide_path,
json_filepath=json_filepath,
filter_non_tissue=True,
patch_size=patch_size,
saveto=saveto,
)
return df
@cli.command(
"patches-df",
context_settings=CONTEXT_SETTINGS,
help="Extract all patches summarized as dataframes",
)
@click.option("--indir", help="Root directory with all tumor WSIs", required=True)
@click.option("--jsondir", help="Root directory with all jsons")
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_mask_df_cmd(indir, jsondir, patchsize, savedir):
"""Extract tissue only patches from tumor WSIs.
"""
wsis = glob.glob(os.path.join(indir, "*.tif"), recursive=False)
data = []
df = pd.DataFrame()
for wsi in wsis:
basename = path_leaf(wsi).replace(".tif", "")
if jsondir:
json_filepath = os.path.join(jsondir, basename + ".json")
else:
json_filepath = None
if not os.path.isfile(json_filepath):
json_filepath = None
saveto = os.path.join(savedir, basename + ".tsv")
data.append((wsi, json_filepath, patchsize, saveto))
os.makedirs(savedir, exist_ok=True)
with tqdm(total=len(wsis)) as pbar:
with Pool(processes=16) as p:
for i, temp_df in enumerate(p.imap_unordered(_process_patches_df, data)):
df = pd.concat([df, temp_df])
pbar.update()
if "is_tumor" in df.columns:
df = df.sort_values(by=["uid", "is_tumor"])
else:
df["is_tumor"] = False
df = df.sort_values(by=["uid"])
df.to_csv(
os.path.join(savedir, "master_df.tsv"), sep="\t", index=False, header=True
)
@cli.command(
"tif-to-df",
context_settings=CONTEXT_SETTINGS,
help="Extract all patches summarized as dataframes from one WSI",
)
@click.option("--tif", help="Tif", required=True)
@click.option("--jsondir", help="Root directory with all jsons")
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def extract_df_from_tif_cmd(tif, jsondir, patchsize, savedir):
"""Extract tissue only patches from tumor WSIs.
"""
basename = path_leaf(tif).replace(".tif", "")
if jsondir:
json_filepath = os.path.abspath(os.path.join(jsondir, basename + ".json"))
else:
json_filepath = None
if not os.path.isfile(json_filepath):
json_filepath = None
saveto = os.path.join(savedir, basename + ".tsv")
df = get_all_patches_from_slide(
tif,
json_filepath=json_filepath,
filter_non_tissue=False,
patch_size=patchsize,
saveto=saveto,
)
@cli.command(
"patch-and-mask",
context_settings=CONTEXT_SETTINGS,
help="Extract all patches and their mask from patches dataframes",
)
@click.option("--df", help="Path to dataframe", required=True)
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--savedf", help="Save edited dataframe to", required=True)
def extract_patch_mask_cmd(df, patchsize, savedir, savedf):
"""Extract tissue only patches from tumor WSIs.
"""
assert not os.path.isfile(savedf)
df = pd.read_table(df)
df_copy = df.copy()
df_copy["img_path"] = None
df_copy["mask_path"] = None
df["savedir"] = savedir
df["patch_size"] = patchsize
records = df.reset_index().to_dict("records")
aprun = ParallelExecutor(n_jobs=100)
total = len(records)
returned_data = aprun(total=total)(
delayed(save_images_and_mask)(f) for f in records
)
"""
with tqdm(total=len(df.index)) as pbar:
with Pool(processes=8) as p:
for idx, img_path, mask_path in p.imap_unordered(
save_images_and_mask, records):
df_copy.loc[idx, 'img_path'] = img_path
df_copy.loc[idx, 'mask_path'] = mask_path
pbar.update()
"""
# df_copy.to_csv(savedf, sep='\t', index=False, header=True)
def process_segmentation_both(data):
"""
Parameters
----------
data: tuple
(png_location, tsv_outpath)
"""
is_tissue, is_tumor, pickle_file, savetopng, savetodf = data
if not is_tissue:
df = pd.DataFrame()
df["is_tumor"] = is_tumor
df["is_tissue"] = is_tissue
return df
patch = joblib.load(pickle_file)
region_properties, _ = label_nuclei(patch, draw=False) # savetopng=savetopng)
summary = summarize_region_properties(region_properties, patch)
df = pd.DataFrame([summary])
df["is_tumor"] = is_tumor
df.to_csv(savetodf, index=False, header=True, sep="\t")
return df
@cli.command(
"segment-from-df", context_settings=CONTEXT_SETTINGS, help="Segment from df"
)
@click.option("--df", help="Path to dataframe", required=True)
@click.option("--finaldf", help="Path to dataframe", required=True)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
def process_df_cmd(df, finaldf, savedir):
df = pd.read_table(df)
df["img_path"] = None
df["mask_path"] = None
modified_df = pd.DataFrame()
os.makedirs(savedir, exist_ok=True)
tile_loc = df.tile_loc.astype(str)
tile_loc = tile_loc.str.replace(" ", "").str.replace(")", "").str.replace("(", "")
df[["row", "col"]] = tile_loc.str.split(",", expand=True)
df["segmented_png"] = (
savedir
+ "/"
+ df[["uid", "row", "col"]].apply(lambda x: "_".join(x.values.tolist()), axis=1)
+ ".segmented.png"
)
df["segmented_tsv"] = (
savedir
+ "/"
+ df[["uid", "row", "col"]].apply(lambda x: "_".join(x.values.tolist()), axis=1)
+ ".segmented.tsv"
)
with tqdm(total=len(df.index)) as pbar:
with Pool(processes=8) as p:
for i, temp_df in enumerate(
p.imap_unordered(
process_segmentation_both,
df[
[
"is_tissue",
"is_tumor",
"img_path",
"segmented_png",
"segmented_tsv",
]
].values.tolist(),
)
):
modified_df = pd.concat([modified_df, temp_df])
pbar.update()
modified_df.to_csv(finaldf, sep="\t", index=False, header=True)
def process_segmentation_fixed(batch_sample):
patch_size = batch_sample["patch_size"]
savedir = os.path.abspath(batch_sample["savedir"])
tile_loc = batch_sample["tile_loc"] # [::-1]
segmentedmethod = batch_sample["segmented_method"]
if isinstance(tile_loc, six.string_types):
tile_row, tile_col = eval(tile_loc)
else:
tile_row, tile_col = tile_loc
segmented_img_path = os.path.join(
savedir, batch_sample["uid"] + "_{}_{}.segmented.png".format(tile_row, tile_col)
)
segmented_tsv_path = os.path.join(
savedir,
batch_sample["uid"] + "_{}_{}.segmented_summary.tsv".format(tile_row, tile_col),
)
if os.path.isfile(segmented_img_path) and os.path.isfile(segmented_tsv_path):
df = pd.read_table(segmented_tsv_path)
return batch_sample["index"], segmented_img_path, segmented_tsv_path, df
# the get_tile tuple required is (col, row)
if not os.path.isfile(batch_sample["img_path"]):
save_images_and_mask(batch_sample)
patch = joblib.load(batch_sample["img_path"])
region_properties, _ = label_nuclei(
patch, draw=False, normalization=segmentedmethod
) # , savetopng=segmented_img_path)
summary = summarize_region_properties(region_properties, patch)
df = pd.DataFrame([summary])
try:
df["is_tumor"] = batch_sample["is_tumor"]
except KeyError:
# Must be from a normal sample
df["is_tumor"] = False
df["is_tissue"] = batch_sample["is_tissue"]
df.to_csv(segmented_tsv_path, index=False, header=True, sep="\t")
return batch_sample["index"], segmented_img_path, segmented_tsv_path, df
@cli.command(
"segment-from-df-fast", context_settings=CONTEXT_SETTINGS, help="Segment from df"
)
@click.option("--df", help="Path to dataframe", required=True)
@click.option("--finaldf", help="Path to dataframe", required=True)
@click.option(
"--segmethod",
help="Path to dataframe",
default=None,
type=click.Choice(["None", "vahadane", "macenko", "xu"]),
)
@click.option(
"--savedir", help="Root directory to save extract images to", required=True
)
@click.option("--ncpu", type=int, default=1, help="Patch size which to extract patches")
@click.option(
"--patchsize", type=int, default=256, help="Patch size which to extract patches"
)
def process_df_cmd_fast(df, finaldf, segmethod, savedir, ncpu, patchsize):
savedir = os.path.abspath(savedir)
df_main = pd.read_table(df)
df = df_main.copy()
df["savedir"] = savedir
df["patch_size"] = patchsize
df["segmented_png"] = None
df["segmented_tsv"] = None
df["segmented_method"] = segmethod
modified_df = pd.DataFrame()
os.makedirs(savedir, exist_ok=True)
df_reset_index = df.reset_index()
df_subset = df_reset_index[df_reset_index.is_tissue == True]
records = df_subset.to_dict("records")
with tqdm(total=len(df_subset.index)) as pbar:
if ncpu > 1:
with Pool(processes=ncpu) as p:
for idx, segmented_png, segmented_tsv, summary_df in p.imap_unordered(
process_segmentation_fixed, records
):
df.loc[idx, "segmented_png"] = segmented_png
df.loc[idx, "segmented_tsv"] = segmented_tsv
modified_df = | pd.concat([modified_df, summary_df]) | pandas.concat |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
number_cells = 5
df = | pd.DataFrame() | pandas.DataFrame |
"""Module providing various functions for processing more complex structured data (e.g., collected during a study)."""
import warnings
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from scipy import interpolate
from biopsykit.utils._datatype_validation_helper import (
_assert_dataframes_same_length,
_assert_has_index_levels,
_assert_has_multiindex,
_assert_is_dtype,
)
from biopsykit.utils.array_handling import sanitize_input_1d
from biopsykit.utils.datatype_helper import (
MeanSeDataFrame,
MergedStudyDataDict,
StudyDataDict,
SubjectConditionDataFrame,
SubjectConditionDict,
SubjectDataDict,
_MeanSeDataFrame,
is_mean_se_dataframe,
is_merged_study_data_dict,
is_study_data_dict,
is_subject_condition_dataframe,
is_subject_condition_dict,
is_subject_data_dict,
)
from biopsykit.utils.functions import se
def _split_data_series(data: pd.DataFrame, time_intervals: pd.Series, include_start: bool) -> Dict[str, pd.DataFrame]:
if time_intervals.index.nlevels > 1:
# multi-index series => second level contains start/end times of phases
time_intervals = time_intervals.unstack().T
time_intervals = {key: tuple(value.values()) for key, value in time_intervals.to_dict().items()}
else:
if include_start:
time_intervals["Start"] = data.index[0].to_pydatetime().time()
# time_intervals.sort_values(inplace=True)
time_intervals = {
name: (start, end)
for name, start, end in zip(time_intervals.index, time_intervals[:-1], time_intervals[1:])
}
return time_intervals
def split_data(
data: pd.DataFrame,
time_intervals: Union[pd.DataFrame, pd.Series, Dict[str, Sequence[str]]],
include_start: Optional[bool] = False,
) -> Dict[str, pd.DataFrame]:
"""Split data into different phases based on time intervals.
The start and end times of the phases are prodivded via the ``time_intervals`` parameter and can either be a
:class:`~pandas.Series`, 1 row of a :class:`~pandas.DataFrame`, or a dictionary with start and end times per phase.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to be split
time_intervals : dict or :class:`~pandas.Series` or :class:`~pandas.DataFrame`
time intervals indicating where the data should be split. This can be:
* :class:`~pandas.Series` object or 1 row of a :class:`~pandas.DataFrame` with `start` times of each phase.
The phase names are then derived from the `index` names in case of a :class:`~pandas.Series` or from the
`columns` names in case of a :class:`~pandas.DataFrame`.
* dictionary with phase names (keys) and tuples with start and end times of the phase (values)
include_start: bool, optional
``True`` to include data from the beginning of the recording to the start of the first phase as the
first phase (this phase will be named "Start"), ``False`` to discard this data. Default: ``False``
Returns
-------
dict
dictionary with data split into different phases
Examples
--------
>>> from biopsykit.utils.data_processing import split_data
>>> # read pandas dataframe from csv file and split data based on time interval dictionary
>>> data = pd.read_csv("path-to-file.csv")
>>> # Example 1: define time intervals (start and end) of the different recording phases as dictionary
>>> time_intervals = {"Part1": ("09:00", "09:30"), "Part2": ("09:30", "09:45"), "Part3": ("09:45", "10:00")}
>>> data_dict = split_data(data=data, time_intervals=time_intervals)
>>> # Example 2: define time intervals as pandas Series. Here, only start times of the are required, it is assumed
>>> # that the phases are back to back
>>> time_intervals = pd.Series(data=["09:00", "09:30", "09:45", "10:00"], index=["Part1", "Part2", "Part3", "End"])
>>> data_dict = split_data(data=data, time_intervals=time_intervals)
>>>
>>> # Example: Get Part 2 of data_dict
>>> print(data_dict['Part2'])
"""
_assert_is_dtype(time_intervals, (pd.DataFrame, pd.Series, dict))
if isinstance(time_intervals, pd.DataFrame):
if len(time_intervals) > 1:
raise ValueError("Only dataframes with 1 row allowed!")
time_intervals = time_intervals.iloc[0]
if isinstance(time_intervals, pd.Series):
time_intervals = _split_data_series(data, time_intervals, include_start)
else:
if include_start:
time_intervals["Start"] = (
data.index[0].to_pydatetime().time(),
list(time_intervals.values())[0][0],
)
data_dict = {name: data.between_time(*start_end) for name, start_end in time_intervals.items()}
data_dict = {name: data for name, data in data_dict.items() if not data.empty}
return data_dict
def exclude_subjects(
excluded_subjects: Union[Sequence[str], Sequence[int]], index_name: Optional[str] = "subject", **kwargs
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""Exclude subjects from dataframes.
This function can be used to exclude subject IDs for later analysis from different kinds of dataframes, such as:
* dataframes with subject condition information
(:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDataFrame`)
* dataframes with time log information
* dataframes with (processed) data (e.g., :obj:`biopsykit.utils.datatype_helper.SalivaRawDataFrame` or
:obj:`~biopsykit.utils.datatype_helper.MeanSeDataFrame`)
All dataframes can be supplied at once via ``**kwargs``.
Parameters
----------
excluded_subjects : list of str or int
list with subjects IDs to be excluded
index_name : str, optional
name of dataframe index level with subject IDs. Default: "subject"
**kwargs :
data to be cleaned as key-value pairs
Returns
-------
:class:`~pandas.DataFrame` or dict of such
dictionary with cleaned versions of the dataframes passed to the function via ``**kwargs``
or dataframe if function was only called with one single dataframe
"""
cleaned_data: Dict[str, pd.DataFrame] = {}
for key, data in kwargs.items():
_assert_is_dtype(data, pd.DataFrame)
if index_name in data.index.names:
level_values = data.index.get_level_values(index_name)
if (level_values.dtype == np.object and all(isinstance(s, str) for s in excluded_subjects)) or (
level_values.dtype == np.int and all(isinstance(s, int) for s in excluded_subjects)
):
cleaned_data[key] = _exclude_single_subject(data, excluded_subjects, index_name, key)
else:
raise ValueError("{}: dtypes of index and subject ids to be excluded do not match!".format(key))
else:
raise ValueError("No '{}' level in index!".format(index_name))
if len(cleaned_data) == 1:
cleaned_data = list(cleaned_data.values())[0]
return cleaned_data
def _exclude_single_subject(
data: pd.DataFrame, excluded_subjects: Union[Sequence[str], Sequence[int]], index_name: str, dataset_name: str
) -> pd.DataFrame:
# dataframe index and subjects are both strings or both integers
try:
if isinstance(data.index, pd.MultiIndex):
# MultiIndex => specify index level
return data.drop(index=excluded_subjects, level=index_name)
# Regular Index
return data.drop(index=excluded_subjects)
except KeyError:
warnings.warn("Not all subjects of {} exist in '{}'!".format(excluded_subjects, dataset_name))
return data
def normalize_to_phase(subject_data_dict: SubjectDataDict, phase: Union[str, pd.DataFrame]) -> SubjectDataDict:
"""Normalize time series data per subject to the phase specified by ``normalize_to``.
The result is the relative change (of, for example, heart rate) compared to the mean value in ``phase``.
Parameters
----------
subject_data_dict : :class:`~biopsykit.utils.datatype_helper.SubjectDataDict`
``SubjectDataDict``, i.e., a dictionary with a :class:`~biopsykit.utils.datatype_helper.PhaseDict`
for each subject
phase : str or :class:`~pandas.DataFrame`
phase to normalize all other data to. If ``phase`` is a string then it is interpreted as the name of a phase
present in ``subject_data_dict``. If ``phase`` is a DataFrame then the data will be normalized (per subject)
to the mean value of the DataFrame.
Returns
-------
dict
dictionary with normalized data per subject
"""
_assert_is_dtype(phase, (str, pd.DataFrame))
dict_subjects_norm = {}
for subject_id, data in subject_data_dict.items():
if isinstance(phase, str):
bl_mean = data[phase].mean()
else:
bl_mean = phase.mean()
dict_subjects_norm[subject_id] = {p: (df - bl_mean) / bl_mean * 100 for p, df in data.items()}
return dict_subjects_norm
def resample_sec(data: Union[pd.DataFrame, pd.Series]) -> pd.DataFrame:
"""Resample input data to a frequency of 1 Hz.
.. note::
For resampling the index of ``data`` either be has to be a :class:`~pandas.DatetimeIndex`
or a :class:`~pandas.Index` with time information in seconds.
Parameters
----------
data : :class:`~pandas.DataFrame` or :class:`~pandas.Series`
data to resample. Index of data needs to be a :class:`~pandas.DatetimeIndex`
Returns
-------
:class:`~pandas.DataFrame`
dataframe with data resampled to 1 Hz
Raises
------
ValueError
If ``data`` is not a DataFrame or Series
"""
_assert_is_dtype(data, (pd.DataFrame, pd.Series))
if isinstance(data, pd.DataFrame):
column_name = data.columns
else:
column_name = [data.name]
if isinstance(data.index, pd.DatetimeIndex):
x_old = np.array((data.index - data.index[0]).total_seconds())
else:
x_old = np.array(data.index - data.index[0])
x_new = np.arange(1, np.ceil(x_old[-1]) + 1)
data = sanitize_input_1d(data)
interpol_f = interpolate.interp1d(x=x_old, y=data, fill_value="extrapolate")
return pd.DataFrame(interpol_f(x_new), index=pd.Index(x_new, name="time"), columns=column_name)
def resample_dict_sec(
data_dict: Dict[str, Any],
) -> Dict[str, Any]:
"""Resample all data in the dictionary to 1 Hz data.
This function recursively looks for all dataframes in the dictionary and resamples data to 1 Hz using
:func:`~biopsykit.utils.data_processing.resample_sec`.
Parameters
----------
data_dict : dict
nested dictionary with data to be resampled
Returns
-------
dict
nested dictionary with data resampled to 1 Hz
See Also
--------
:func:`~biopsykit.utils.data_processing.resample_sec`
resample dataframe to 1 Hz
"""
result_dict = {}
for key, value in data_dict.items():
if isinstance(value, (pd.DataFrame, pd.Series)):
result_dict[key] = resample_sec(value)
elif isinstance(value, dict):
result_dict[key] = resample_dict_sec(value)
else:
raise ValueError("Invalid input format!")
return result_dict
def select_dict_phases(subject_data_dict: SubjectDataDict, phases: Union[str, Sequence[str]]) -> SubjectDataDict:
"""Select specific phases from :obj:`~biopsykit.utils.datatype_helper.SubjectDataDict`.
Parameters
----------
subject_data_dict : :obj:`~biopsykit.utils.datatype_helper.SubjectDataDict`
``SubjectDataDict``, i.e. a dictionary with :obj:`~biopsykit.utils.datatype_helper.PhaseDict` for each subject
phases : list of str
list of phases to select
Returns
-------
:obj:`~biopsykit.utils.datatype_helper.SubjectDataDict`
``SubjectDataDict`` containing only the phases of interest
"""
is_subject_data_dict(subject_data_dict)
if isinstance(phases, str):
phases = [phases]
return {
subject: {phase: dict_subject[phase] for phase in phases} for subject, dict_subject in subject_data_dict.items()
}
def rearrange_subject_data_dict(
subject_data_dict: SubjectDataDict,
) -> StudyDataDict:
"""Rearrange :obj:`~biopsykit.utils.datatype_helper.SubjectDataDict` to \
:obj:`~biopsykit.utils.datatype_helper.StudyDataDict`.
A ``StudyDataDict`` is constructed from a ``SubjectDataDict`` by swapping outer (subject IDs) and inner
(phase names) dictionary keys.
The **input** needs to be a :obj:`~biopsykit.utils.datatype_helper.SubjectDataDict`,
a nested dictionary in the following format:
| {
| "subject1" : { "phase_1" : dataframe, "phase_2" : dataframe, ... },
| "subject2" : { "phase_1" : dataframe, "phase_2" : dataframe, ... },
| ...
| }
The **output** format will be the following:
| {
| "phase_1" : { "subject1" : dataframe, "subject2" : dataframe, ... },
| "phase_2" : { "subject1" : dataframe, "subject2" : dataframe, ... },
| ...
| }
Parameters
----------
subject_data_dict : :obj:`~biopsykit.utils.datatype_helper.SubjectDataDict`
``SubjectDataDict``, i.e. a dictionary with data from multiple subjects, each containing data from
multiple phases (in form of a :obj:`~biopsykit.utils.datatype_helper.PhaseDict`)
Returns
-------
:obj:`~biopsykit.utils.datatype_helper.StudyDataDict`
rearranged ``SubjectDataDict``
"""
dict_flipped = {}
phases = [np.array(dict_phase.keys()) for dict_phase in subject_data_dict.values()]
if not all(phases[0] == p for p in phases):
raise ValueError(
"Error rearranging the dictionary! Not all 'PhaseDict's have the same phases. "
"To rearrange the 'SubjectDataDict', "
"the dictionaries of all subjects need to have the exact same phases!"
)
for subject, phase_dict in subject_data_dict.items():
for phase, df in phase_dict.items():
dict_flipped.setdefault(phase, dict.fromkeys(subject_data_dict.keys()))
dict_flipped[phase][subject] = df
return dict_flipped
def cut_phases_to_shortest(study_data_dict: StudyDataDict, phases: Optional[Sequence[str]] = None) -> StudyDataDict:
"""Cut time-series data to shortest duration of a subject in each phase.
To overlay time-series data from multiple subjects in an `ensemble plot` it is beneficial if all data have
the same length. For that reason, data can be cut to the same length using this function.
Parameters
----------
study_data_dict : :obj:`~biopsykit.utils.datatype_helper.StudyDataDict`
``StudyDataDict``, i.e. a dictionary with data from multiple phases, each phase containing data from
different subjects.
phases : list of str, optional
list of phases if only a subset of phases should be cut or ``None`` to cut all phases.
Default: ``None``
Returns
-------
:obj:`~biopsykit.utils.datatype_helper.StudyDataDict`
``StudyDataDict`` with data cut to the shortest duration in each phase
"""
is_study_data_dict(study_data_dict)
if phases is None:
phases = study_data_dict.keys()
dict_cut = {}
for phase in phases:
min_dur = min([len(df) for df in study_data_dict[phase].values()])
dict_cut[phase] = {subject: df.iloc[0:min_dur].copy() for subject, df in study_data_dict[phase].items()}
is_study_data_dict(study_data_dict)
return dict_cut
def merge_study_data_dict(study_data_dict: StudyDataDict) -> MergedStudyDataDict:
"""Merge inner dictionary level of :obj:`~biopsykit.utils.datatype_helper.StudyDataDict` into one dataframe.
This function removes the inner level of the nested ``StudyDataDict`` by merging data from all subjects
into one dataframe for each phase.
.. note::
To merge data from different subjects into one dataframe the data are all expected to have the same length!
If this is not the case, all data needs to be cut to equal length first, e.g. using
:func:`~biopsykit.utils.data_processing.cut_phases_to_shortest`.
Parameters
----------
study_data_dict : :obj:`~biopsykit.utils.datatype_helper.StudyDataDict`
``StudyDataDict``, i.e. a dictionary with data from multiple phases, each phase containing data from
different subjects.
Returns
-------
:obj:`~biopsykit.utils.datatype_helper.MergedStudyDataDict`
``MergedStudyDataDict`` with data of all subjects merged into one dataframe for each phase
"""
is_study_data_dict(study_data_dict)
dict_merged = {}
for phase, dict_phase in study_data_dict.items():
_assert_dataframes_same_length(list(dict_phase.values()))
df_merged = pd.concat(dict_phase, names=["subject"], axis=1)
df_merged.columns = df_merged.columns.droplevel(1)
dict_merged[phase] = df_merged
is_merged_study_data_dict(dict_merged)
return dict_merged
def split_dict_into_subphases(
data_dict: Dict[str, Any],
subphases: Dict[str, int],
) -> Union[Dict[str, Dict[str, Any]]]:
"""Split dataframes in a nested dictionary into subphases.
By further splitting a dataframe into subphases a new dictionary level is created. The new dictionary level
then contains the subphases with their data.
.. note::
If the duration of the last subphase is unknown (e.g., because it has variable length) this can be
indicated by setting the duration of this subphase to 0.
The duration of this subphase will then be inferred from the data.
Parameters
----------
data_dict : dict
dictionary with an arbitrary number of outer level (e.g., conditions, phases, etc.) as keys and
dataframes with data to be split into subphases as values
subphases : dict
dictionary with subphase names (keys) and subphase durations (values) in seconds
Returns
-------
dict
dictionary where each dataframe in the dictionary is split into the subphases specified by ``subphases``
"""
result_dict = {}
for key, value in data_dict.items():
_assert_is_dtype(value, (dict, pd.DataFrame))
if isinstance(value, dict):
# nested dictionary
result_dict[key] = split_dict_into_subphases(value, subphases)
else:
subphase_times = get_subphase_durations(value, subphases)
subphase_dict = {}
for subphase, times in zip(subphases.keys(), subphase_times):
if isinstance(value.index, pd.DatetimeIndex):
# slice the current subphase by dropping the preceding subphases
value_cpy = value.drop(value.first("{}s".format(times[0])).index)
value_cpy = value_cpy.first("{}s".format(times[1] - times[0]))
subphase_dict[subphase] = value_cpy
else:
subphase_dict[subphase] = value.iloc[times[0] : times[1]]
result_dict[key] = subphase_dict
return result_dict
def get_subphase_durations(
data: pd.DataFrame, subphases: Dict[str, Union[int, Tuple[int, int]]]
) -> Sequence[Tuple[int, int]]:
"""Compute subphase durations from dataframe.
The subphases can be specified in two different ways:
* If the dictionary entries in ``subphases`` are integer, it's assumed that subphases are consecutive,
i.e., each subphase begins right after the previous one, and the entries indicate the *durations* of each
subphase. The start and end times of each subphase will then be computed from the subphase durations.
* If the dictionary entries in ``subphases`` are tuples, it's assumed that the start and end times of each
subphase are directly provided.
.. note::
If the duration of the last subphase is unknown (e.g., because it has variable length) this can be
indicated by setting the duration of this subphase to 0.
The duration of this subphase will then be inferred from the data.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe with data from one phase. Used to compute the duration of the last subphase if this subphase
is expected to have variable duration.
subphases : dict
dictionary with subphase names as keys and subphase durations (as integer) or start and end
times (as tuples of integer) as values in seconds
Returns
-------
list
list with start and end times of each subphase in seconds relative to beginning of the phase
Examples
--------
>>> from biopsykit.utils.data_processing import get_subphase_durations
>>> # Option 1: Subphases consecutive, subphase durations provided
>>> get_subphase_durations(data, {"Start": 60, "Middle": 120, "End": 60})
>>> # Option 2: Subphase start and end times provided
>>> get_subphase_durations(data, {"Start": (0, 50), "Middle": (60, 160), "End": (180, 240)})
"""
subphase_durations = np.array(list(subphases.values()))
if subphase_durations.ndim == 1:
# 1d array => subphase values are integer => they are consecutive and each entry is the duration
# of the subphase, so the start and end times of each subphase must be computed
times_cum = np.cumsum(subphase_durations)
if subphase_durations[-1] == 0:
# last subphase has duration 0 => end of last subphase is length of dataframe
times_cum[-1] = len(data)
subphase_times = list(zip([0] + list(times_cum), times_cum))
else:
# 2d array => subphase values are tuples => start end end time of each subphase are already provided and do
# not need to be computed
subphase_times = subphase_durations
return subphase_times
def add_subject_conditions(
data: pd.DataFrame, condition_list: Union[SubjectConditionDict, SubjectConditionDataFrame]
) -> pd.DataFrame:
"""Add subject conditions to dataframe.
This function expects a dataframe with data from multiple subjects and information on which subject
belongs to which condition.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe where new index level ``condition`` with subject conditions should be added to
condition_list : ``SubjectConditionDict`` or ``SubjectConditionDataFrame``
:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDict` or
:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDataFrame` with information on which subject belongs to
which condition
Returns
-------
:class:`~pandas.DataFrame`
dataframe with new index level ``condition`` indicating which subject belongs to which condition
"""
if is_subject_condition_dataframe(condition_list, raise_exception=False):
condition_list = condition_list.groupby("condition").groups
is_subject_condition_dict(condition_list)
return pd.concat({cond: data.loc[subjects] for cond, subjects in condition_list.items()}, names=["condition"])
def split_subject_conditions(
data_dict: Dict[str, Any], condition_list: Union[SubjectConditionDict, SubjectConditionDataFrame]
) -> Dict[str, Dict[str, Any]]:
"""Split dictionary with data based on conditions subjects were assigned to.
This function adds a new outer dictionary level with the different conditions as keys and dictionaries
belonging to the conditions as values. For that, it expects a dictionary with data from multiple subjects and
information on which subject belongs to which condition.
Parameters
----------
data_dict : dict
(nested) dictionary with data which should be split based on the conditions subjects belong to
condition_list : ``SubjectConditionDict`` or ``SubjectConditionDataFrame``
:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDict` or
:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDataFrame` with information on which subject belongs to
which condition
Returns
-------
dict
dictionary with additional outer level indicating which subject belongs to which condition
"""
if is_subject_condition_dataframe(condition_list, raise_exception=False):
condition_list = condition_list.groupby("condition").groups
is_subject_condition_dict(condition_list)
return {cond: _splits_subject_conditions(data_dict, subjects) for cond, subjects in condition_list.items()}
def _splits_subject_conditions(data_dict: Dict[str, Any], subject_list: Sequence[str]):
_assert_is_dtype(data_dict, (dict, pd.DataFrame))
if isinstance(data_dict, pd.DataFrame):
return data_dict[subject_list]
return {key: _splits_subject_conditions(value, subject_list) for key, value in data_dict.items()}
def mean_per_subject_dict(data: Dict[str, Any], dict_levels: Sequence[str], param_name: str) -> pd.DataFrame:
"""Compute mean values of time-series data from a nested dictionary.
This function computes the mean value of time-series data in a nested dictionary per subject and combines it into
a joint dataframe. The dictionary will be traversed recursively and can thus have arbitrary depth.
The most inner level must contain dataframes with time-series data of which mean values will be computed.
The names of the dictionary levels are specified by ``dict_levels``.
Parameters
----------
data: dict
nested dictionary with data on which mean should be computed. The number of nested levels must match the
number of levels specified in ``dict_levels``.
dict_levels : list of str
list with names of dictionary levels.
param_name : str
type of data of which mean values will be computed from.
This will also be the column name in the resulting dataframe.
Returns
-------
:class:`~pandas.DataFrame`
dataframe with ``dict_levels`` as index levels and mean values of time-series data as column values
"""
result_data = {}
one_col_df = False
for key, value in data.items():
_assert_is_dtype(value, (dict, pd.DataFrame))
if isinstance(value, dict):
if len(dict_levels) <= 1:
raise ValueError("Invalid number of 'dict_levels' specified!")
# nested dictionary
result_data[key] = mean_per_subject_dict(value, dict_levels[1:], param_name)
else:
value.columns.name = "subject"
if len(value.columns) == 1:
one_col_df = True
df = pd.DataFrame(value.mean(axis=0), columns=[param_name])
result_data[key] = df
ret = | pd.concat(result_data, names=[dict_levels[0]]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mar 8 2019
@author: <NAME>
email : <EMAIL>
"""
################################################################################
# THIS SCRIPT IS FOR ANALYZE THE RISK FACTORS WITH SVMs
# Tested with Python 2.7 and Python 3.5 on Ubuntu Mate Release 16.04.5 LTS (Xenial Xerus) 64-bit
###############################################################################
'''
IMPORT LIBRARIES
'''
import numpy as np
import pandas as pd
import os
from cvd_ids_in_ukbb_normal_pca import find_cvds_ukbb
#from analyze_plots_ukbb import *
from sklearn.svm import SVC
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from scipy import interp
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
def ROC_curve(X, y,setA,label,clf,name,path_to_save):
cv=StratifiedKFold(n_splits=10)
classifier = clf
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i=0
for train, test in cv.split(X,y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of %s using %s'%(setA[0], label))
plt.legend(loc="lower right")
#plt.show()
title='%s_%s_%s'%(setA[0],label,name)
title.replace(' ','_')
plt.savefig(path_to_save+'ROC_%s.png'%title)
plt.close()
def find_min_overlap(overlap_angina):
min_overlap_id = overlap_angina.values.argmin()
min_overlap_name =overlap_angina.columns[min_overlap_id]
return min_overlap_name
def get_conventional_indices (convention, nor_df_training):
'''
Get conventional indices for Normal Training
'''
conventional_indices_training_nor = convention.loc[convention['f.eid'].isin(nor_df_training['patient'])]
conventional_indices_training_nor = conventional_indices_training_nor.set_index('f.eid')
conventional_indices_training_nor = conventional_indices_training_nor.reindex(index = nor_df_training['patient'])
conventional_indices_training_nor=conventional_indices_training_nor.fillna(conventional_indices_training_nor.mean())
conventional_indices_LV_training = conventional_indices_training_nor.filter(regex=( 'LV'))
conventional_indices_LV_training =conventional_indices_LV_training.iloc[:,:-1]
# conventional_indices_LA_training = conventional_indices_training_nor.filter(regex=( 'LA'))
# conventional_indices_LA_training =conventional_indices_LA_training.iloc[:,:-1]
conventional_indices_RV_training = conventional_indices_training_nor.filter(regex=('RV'))
conventional_indices_RV_training =conventional_indices_RV_training.iloc[:,:-1]
conventional_all_training_nor = pd.concat([conventional_indices_LV_training,conventional_indices_RV_training],axis=1)
return conventional_all_training_nor
file_feat=open('.../conditions.txt')
conditions=[]
with open(".../conditions.txt") as feat:
for line in feat:
f=line.strip()
f=line.split(",")
for i in range (len(f)):
conditions.append(str(f[i]))
os.chdir(".../Risk Factors_new conditions_even_cases/Diabetes_ML/")
### Define Risk factors to analyze
risk_factors =[
# ['angina'],
# ['high cholesterol'],
['diabetes'],
# ['asthma'],
# ['hypertension',\
# 'essential hypertension']
]
#### List of CVDs in UK Biobank ##############################################
cvds_classify=[
'stroke', \
'transient ischaemic attack (tia)',\
'peripheral vascular disease',\
'deep venous thrombosis (dvt)', \
'heart valve problem/heart murmur',\
'cardiomyopathy',\
'atrial fibrillation',\
'irregular heart beat', \
'heart/cardiac problem', \
'raynaud\'s phenomenon/disease',\
'heart attack/myocardial infarction',\
'hypertrophic cardiomyopathy (hcm / hocm)',\
'aortic regurgitation',\
'aortic stenosis',\
'aortic aneurysm',\
'vasculitis',\
'varicose veins',\
'mitral valve prolapse',\
'pericardial problem',\
'pericardial effusion',\
'pericarditis',\
'svt / supraventricular tachycardia',\
'wegners granulmatosis',\
'angina',\
'high cholesterol', \
'hypertension','essential hypertension',\
'diabetes',\
'asthma',\
'type 1 diabetes'\
'type 2 diabetes'\
'heart arrhythmia'\
'gestational diabetes'\
'mitral valve prolapse'\
'mitral regurgitation / incompetence'\
'mitral valve disease'\
'pulmonary embolism +/- dvt'\
'gestational hypertension/pre-eclampsia'\
'heart failure/pulmonary odema'\
'cardiomyopathy'\
'aortic stenosis'\
'atrial flutter'\
'hypertrophic cardiomyopathy (hcm / hocm)'\
'myocarditis'
]
cvds_samples=[]
cvd_classifier_acc=[]
acc_all=[]
models=[]
#### Take the UKBB files ##########################################################################################
#### these 3 files will be check for the cardiovascular diseases
## to find the samples
main_path_files = '.../UKBB Data Information/Files/'
#1-
conditions=pd.read_csv(main_path_files+'medical_conditions_.csv', low_memory=False)
#2-
history=pd.read_csv(main_path_files+'health_and_medical_history_.csv', low_memory=False)
#3-
outcomes=pd.read_csv(main_path_files+'health_related_outcomes_.csv', low_memory=False)
###
#### Take the conventional clinical indices to make the comparison of the results #####################################
convention =pd.read_csv(main_path_files+'imaging_heart_mri_qmul_oxford_.csv', low_memory=False)
## Get genetics data (if needed)
#genomics = pd.read_csv('genomics_decoded_headings_decoded_data_2017-May-17_1445_r4d.csv', low_memory=False)
#genomics=genomics.fillna(genomics.mean())
#genomics.drop(genomics.select_dtypes(['object']), inplace=True, axis=1)
#### Take the calculated radiomics features
radiomics_ukbb=pd.read_csv('.../cardiac radiomics for Application 2964/1. Radiomics results calculated.csv', low_memory=False)
'''
Define different classifiers
'''
names = [ "Linear SVM_C01", "Linear SVM_C1", "Linear SVM_C10" ,
"RBF SVM_gamma_01_C0.1","RBF SVM_gamma_01_C1","RBF SVM_gamma_01_C10",
"RBF SVM_gamma_1_C0.1","RBF SVM_gamma_1_C1","RBF SVM_gamma_1_C10",
"RBF SVM_gamma_10_C0.1","RBF SVM_gamma_10_C1","RBF SVM_gamma_10_C10",
]
classifiers = [
SVC(kernel="linear", C=0.1, probability =True),
SVC(kernel="linear", C=1, probability =True),
SVC(kernel='linear', C=10, probability=True),
SVC(gamma=0.1, C=0.1, probability =True),
SVC(gamma=0.1, C=1, probability =True),
SVC(gamma=0.1, C=10, probability =True),
SVC(gamma=1, C=0.1, probability =True),
SVC(gamma=1, C=1, probability =True),
SVC(gamma=1, C=10, probability =True),
SVC(gamma=10, C=0.1, probability =True),
SVC(gamma=10, C=1, probability =True),
SVC(gamma=10, C=10, probability =True),
]
cvds_samples_all=[]
cvds_samples_random_selection=[]
cvd_classifier_acc=[]
acc_all=[]
cases=[]
models=[]
models_conv=[]
read_samples_path ='.../Risk Factors_new conditions_even_cases/Results-Single_feat_Last/'
path_to_save_roc='.../Risk Factors_new conditions_even_cases/Diabetes_ML/'
for i in range(len(risk_factors)):
#### Define the set for each risk to analyze
setA= risk_factors[i]
rest_cvds_classify = cvds_classify
rest_risk_factors = list(filter(lambda x: x not in setA,risk_factors ))
'''
Get the radiomics features for Normals and risk factors
'''
# Find CVDs in UK Biobank data and add 'normal' cases as a new instance in the list, cvds_classify
# print('Analyzing %s in UK Biobank...'%(setA))
# [nor_df, setA_df, rest_cvds_classify] =find_cvds_ukbb(conditions,radiomics_ukbb, rest_cvds_classify, setA)
nor_df =pd.read_csv(read_samples_path+'normal_%s.csv'%setA[0])
setA_df = | pd.read_csv(read_samples_path+'setA_df_%s.csv'%setA[0]) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame(pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
lowM =pd.DataFrame(pd.pivot_table(data,values='LowPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
closeM = pd.DataFrame(pd.pivot_table(data,values='ClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
volumeM = pd.DataFrame(pd.pivot_table(data,values='TurnoverVolume',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
amountM = pd.DataFrame(pd.pivot_table(data,values='TurnoverValue',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
retM = pd.DataFrame(pd.pivot_table(data,values='StockReturns',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)], columns=self.tickerUnivSR)
sql = '''
SELECT A.[ExDiviDate], B.[SecuMarket], B.[SecuCode], A.[AdjustingFactor]
FROM [JYDB].[dbo].[QT_AdjustingFactor] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
'''
dataAF = pd.read_sql_query(sql, conn243)
dataAF = dataAF.rename({'ExDiviDate':'TradingDay'},axis=1)
flagMarket = dataAF.SecuMarket == 83
dataAF['SecuCode'][flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SH')
dataAF['SecuCode'][~flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SZ')
dataAF.TradingDay = dataAF.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
adjFactorM = pd.pivot_table(dataAF, values='AdjustingFactor', index='TradingDay', columns='SecuCode')
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM = pd.DataFrame(adjFactorM ,index=self.tradingDateV, columns=self.tickerUnivSR)
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM =pd.DataFrame(adjFactorM ,index=[str(tradingDay)])
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.ChangeType = 1 or A.ChangeType = 4)
'''
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[PubDate],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexBasicInfo] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
'''
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex['ChangeType'] = 1
dataIndex = dataIndex.rename({'PubDate': 'ChangeDate'}, axis='columns')
dataV = pd.concat([dataIndex, dataStock])
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
listedM = pd.pivot_table(dataV, values='ChangeType', index='ChangeDate', columns='SecuCode')
dateTotal = np.union1d(listedM.index.values, [str(tradingDay)])
listedM = pd.DataFrame(listedM, index=dateTotal, columns=self.tickerUnivSR)
listedM[listedM == 4] = 0
listedM.fillna(method='pad', inplace=True)
listedM = pd.DataFrame(listedM,index= [str(tradingDay)])
listedM = listedM.fillna(0)
sql = '''
SELECT A.[SuspendDate],A.[ResumptionDate],A.[SuspendTime], A.[ResumptionTime], B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SuspendResumption] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.[SuspendDate] = '%s'
'''%tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SuspendDate] = ','A.[SuspendDate] <= ')
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[flag] = 1
suspM = endFlag.fillna(0)
suspM[(listedM==0)] = 1
else:
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
suspPre = pickle.load(file2)['suspM']
file2.close()
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[~flag] = 0
suspM = pd.concat([suspPre,endFlag]).fillna(method='pad')
suspM = pd.DataFrame(suspM,index=[str(tradingDay)])
suspM[(listedM==0)] = 1
sql='''
SELECT A.[SpecialTradeTime],A.[SpecialTradeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SpecialTrade] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.[SpecialTradeType]=1 or A.[SpecialTradeType] = 2 or A.[SpecialTradeType] = 5 or A.[SpecialTradeType] = 6)
and A.[SpecialTradeTime] = '%s'
'''% tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SpecialTradeTime] = ','A.[SpecialTradeTime] <= ')
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
dateTotal = np.union1d(stStateM.index.values, [str(tradingDay)])
stStateM = pd.DataFrame(stStateM, index=dateTotal, columns=self.tickerUnivSR)
stStateM = stStateM.fillna(method='pad')
stStateM = pd.DataFrame(stStateM, index=[str(tradingDay)])
stStateM = stStateM.fillna(0)
else:
try:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
dataV = | pd.read_sql_query(sql, conn243) | pandas.read_sql_query |
# coding: utf8
# part of pybacktest package: https://github.com/ematvey/pybacktest
""" Functions for calculating performance statistics and reporting """
import pandas as pd
import numpy as np
start = lambda eqd: eqd.index[0]
end = lambda eqd: eqd.index[-1]
days = lambda eqd: (eqd.index[-1] - eqd.index[0]).days
trades_per_month = lambda eqd: eqd.groupby(
lambda x: (x.year, x.month)
).apply(lambda x: x[x != 0].count()).mean()
profit = lambda eqd: eqd.sum()
average = lambda eqd: eqd[eqd != 0].mean()
average_gain = lambda eqd: eqd[eqd > 0].mean()
average_loss = lambda eqd: eqd[eqd < 0].mean()
winrate = lambda eqd: float(sum(eqd > 0)) / len(eqd)
payoff = lambda eqd: eqd[eqd > 0].mean() / -eqd[eqd < 0].mean()
pf = PF = lambda eqd: abs(eqd[eqd > 0].sum() / eqd[eqd < 0].sum())
maxdd = lambda eqd: (eqd.cumsum().expanding().max() - eqd.cumsum()).max()
rf = RF = lambda eqd: eqd.sum() / maxdd(eqd)
trades = lambda eqd: len(eqd[eqd != 0])
_days = lambda eqd: eqd.resample('D').sum().dropna()
def sharpe(eqd):
''' daily sharpe ratio '''
d = _days(eqd)
return (d.mean() / d.std()) * (252**0.5)
def sortino(eqd):
''' daily sortino ratio '''
d = _days(eqd)
return (d.mean() / d[d < 0].std()) * (252**0.5)
def ulcer(eqd):
eq = eqd.cumsum()
return (((eq - eq.expanding().max()) ** 2).sum() / len(eq)) ** 0.5
def upi(eqd, risk_free=0):
eq = eqd[eqd != 0]
return (eq.mean() - risk_free) / ulcer(eq)
UPI = upi
def mpi(eqd):
""" Modified UPI, with enumerator resampled to months (to be able to
compare short- to medium-term strategies with different trade frequencies. """
return eqd.resample('M').sum().mean() / ulcer(eqd)
MPI = mpi
def mcmdd(eqd, runs=100, quantile=0.99, array=False):
maxdds = [maxdd(eqd.take(np.random.permutation(len(eqd)))) for i in range(runs)]
if not array:
return | pd.Series(maxdds) | pandas.Series |
import numpy as np
import pandas as pd
from pathlib import Path
def imgs_to_df (imgs, fps=None):
imgs = [ img_to_df(img=i, frame_id=frame) for frame,i in enumerate(imgs) ]
df = pd.concat(imgs, ignore_index=True)
if not fps is None:
df['time'] = df['frame'] * (1/fps)
return df
def img_to_df (img, frame_id=None, fps=None):
ult = img.flatten()
xlen = img.shape[1]
ylen = img.shape[0]
xxx = np.array(list(range(xlen))*ylen)
yyy = np.repeat(np.arange(ylen), xlen)
df = pd.DataFrame({'brightness':ult, 'x':xxx, 'y':yyy})
if not frame_id is None:
df['frame'] = frame_id
if not fps is None:
df['time'] = df['frame'] * (1/fps)
return df
def integrate_segments ( df, dfphones, dfwords, rmvnoise=True ):
dfphones = alignment_df(dfphones, dfwords)
df['segment'] = ''
df['word'] = ''
for i in dfphones.index:
cst = dfphones.loc[i,'start']
ced = dfphones.loc[i,'end']
csg = dfphones.loc[i,'segments']
cwd = dfphones.loc[i,'word']
df.loc[ (df.time>cst) & (df.time<ced), 'segment'] = csg
df.loc[ (df.time>cst) & (df.time<ced), 'word'] = cwd
if rmvnoise:
df = rmv_noise(df)
return df
def alignment_df (dfphones, dfwords):
dfphones['start'] = dfphones['end'].shift(fill_value=0)
dfwords['start'] = dfwords['end'].shift(fill_value=0)
def word_now (value, wrds):
res = [ k for i,j,k in zip(wrds.start, wrds.end, wrds.words) if (value>i) and (value<=j) ]
if len(res)!=1:
raise ValueError('The provided value corresponds to more than one word.')
return res[0]
dfphones['word'] = [ word_now(i, dfwords) for i in dfphones['end'] ]
return dfphones
def rmv_noise (df):
noise = ['_p:_','<P>','_NOISE_','<NOISE>']
colnames = ['segment','word']
exist_col_pos = [ i in df.columns for i in colnames ]
colnames = np.array(colnames)[exist_col_pos]
for i in colnames:
pos1 = df[i].isin(noise)
pos2 = df[i].isna()
pos3 = | pd.Series([ j=='' for j in df[i] ]) | pandas.Series |
#!/usr/bin/env python
'''
Compare the waveforms taken by the MSO5240 scope.
'''
import os
import pandas as pd
import seaborn as sns
import sys
class ScopeWaveform:
def __init__(self, infpn):
columns = ['info_name', 'value', 'units', 'time', 'waveform_value']
self.df = | pd.read_csv(infpn, names=columns) | pandas.read_csv |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
| assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:]) | pandas.testing.assert_frame_equal |
import json
import pandas as pd
from scipy.stats.stats import pearsonr, spearmanr
import numpy as np
from scipy import stats
import sys
import matplotlib.pyplot as plt
import os
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder
import argparse
def parse_args(args):
parser = argparse.ArgumentParser(description='Arguments for the evaluation script.')
baseline_metrics = [
# 'Bleu',
# 'Meteor',
# 'Rouge 1',
# 'Rouge 2',
'Rouge L',
'BertScore P Art',
# 'BertScore R Art',
# 'BertScore F1 Art',
# 'FEQA',
'QAGS',
# 'OpenIE',
'Dep Entail',
'FactCC',
]
ablations_cols = [
'Flip_Semantic_Frame_Errors', 'Flip_Discourse_Errors', 'Flip_Content_Verifiability_Errors',
# 'Flip_RelE', 'Flip_EntE', 'Flip_CircE', 'Flip_OutE', 'Flip_GramE', 'Flip_CorefE', 'Flip_LinkE', 'Flip_Other'
]
model_names = [
'bart','pgn', 'bus', 'bert_sum', 's2s',
'TranS2S', 'TConvS2S', 'PtGen', 'BERTS2S'
]
parser.add_argument('--mode', default='hm-correlation', choices=['hm-correlation', 'ablations', 'ablations-plot', 'mm-correlation'], help=(
'This script can calculate correlation with human judgments (hm-correlation),'
' evaluate the performance of the evaluation metrics at capturing different types of factual errors (ablations),'
' output the ablation as a plot (ablations-plot), and compute the Williams test (mm-correlation)'
))
parser.add_argument('--human_eval_path', default='/home/phillab/data/frank/human_annotations.json', help='file containing human annotations expects csv file.')
parser.add_argument('--baseline_metrics_outputs', default='/home/phillab/data/frank/baseline_factuality_metrics_outputs.json', help='file name containing outputs of baseline factuality metrics.')
parser.add_argument('--baseline_metrics', nargs='+', default=baseline_metrics, help='baseline metrics to evaluate on (should match the name in the baseline metrics output file).')
parser.add_argument('--no_baseline_metrics', action='store_true', help='If set, does not evaluate the baseline metrics')
parser.add_argument('--metrics_outputs', default=None, help='names of json files containing metric outputs with key "score"')
parser.add_argument('--metrics_outputs_info', default=None, help='json file describing how to parse metrics output files. This allows to customize the name of the score key and to have several metrics in one json file.')
parser.add_argument('--ablations', nargs='+', default=ablations_cols, help='column names for ablations.')
parser.add_argument('--human', default='Factuality', help='column for human judgements.')
parser.add_argument('--no_partial_correlation', action='store_true')
parser.add_argument('--partial_correlation_variable', default='model_name', help='what column to use as confounding to calculate partial correlations')
parser.add_argument('--store_path', default=None)
parser.add_argument('--dataset', default=None, choices=[None, 'cnndm', 'bbc'], help='if None use all data')
parser.add_argument('--model_name', nargs='+', default=None, help=f'by default use all data, availble model names {model_names}')
args = parser.parse_args(args)
return vars(args)
def williams_test(r12, r13, r23, n):
"""The Williams test (<NAME>. 1959. Regression Analysis, volume 14. Wiley, New York, USA)
A test of whether the population correlation r12 equals the population correlation r13.
Significant: p < 0.05
Arguments:
r12 (float): correlation between x1, x2
r13 (float): correlation between x1, x3
r23 (float): correlation between x2, x3
n (int): size of the population
Returns:
t (float): Williams test result
p (float): p-value of t-dist
"""
if r12 < r13:
print('r12 should be larger than r13')
sys.exit()
elif n <= 3:
print('n should be larger than 3')
sys.exit()
else:
K = 1 - r12**2 - r13**2 - r23**2 + 2*r12*r13*r23
denominator = np.sqrt(2*K*(n-1)/(n-3) + (((r12+r13)**2)/4)*((1-r23)**3))
numerator = (r12-r13) * np.sqrt((n-1)*(1+r23))
t = numerator / denominator
p = 1 - stats.t.cdf(t, df=n-3) # changed to n-3 on 30/11/14
return t, p
def human_metric_correlation(
data_df,
human_col,
metrics_cols,
partial_correlation=True,
partial_correlation_variable=None
):
"""
human_df: pandas dataframe, should only contain one column corresponding to human judgements
metrics_df: pandas dataframe, columns are metrics.
partial_correlation: bool - whether to use partial correlations.
returns a pandas dataframe with pearson and spearman correlation results
"""
correlations = []
named_correlations = dict()
for metric in metrics_cols:
if metric not in data_df:
correlations.append([0, 0, 0, 0])
named_correlations[metric] = [0, 0, 0, 0]
print(f'Warning: {metric} not in dataframe.')
continue
mask = (data_df[metric].isnull() == False) & (data_df[human_col].isnull() == False)
X = data_df[metric][mask]
Y = data_df[human_col][mask]
if partial_correlation:
assert partial_correlation_variable is not None, f'You must specify a column to use as confounding variable for partial correlation calculation'
Q = np.array(data_df[mask][partial_correlation_variable])
enc = OneHotEncoder(handle_unknown='ignore')
Q = enc.fit_transform(Q.reshape(-1, 1))
pred_X = LinearRegression().fit(Q, X).predict(Q)
pred_Y = LinearRegression().fit(Q, Y).predict(Q)
X = X - pred_X
Y = Y - pred_Y
print(f'Info: metric {metric} used {len(X)} summaries to calculate correlation.')
pr, pp = pearsonr(X, Y)
sr, sp = spearmanr(X, Y)
correlations.append([pr, pp, sr, sp])
named_correlations[metric] = [pr, pp, sr, sp]
correlation_df = pd.DataFrame.from_dict(
named_correlations,
orient='index',
columns=['pearson', 'pearson p-value', 'spearman', 'spearman p-value']
)
return correlation_df
def metric_metric_correlation(
data_df,
human_col,
metrics_cols,
partial_correlation=True,
partial_correlation_variable=None
):
"""
metrics_df: pandas dataframe, columns taken as metrics
partial_correlation: bool - whether to use partial correlations.
returns of tuple with two dataframes: (correlation_df, williams_df)
correlation_df is a dataframe that contains metric-metric pearson correlation
williams_df is a dataframe of booleans on weather the two metrics are different in statistically significant terms
"""
correlations = []
williams = []
for i, metric1 in enumerate(metrics_cols):
correlation_metric = []
williams_metric = []
for j, metric2 in enumerate(metrics_cols):
if j == i:
correlation_metric.append(1)
williams_metric.append(False)
continue
mask1 = (data_df[metric1].isnull() == False) & (data_df['model_name'] != 'reference')
mask2 = (data_df[metric2].isnull() == False) & (data_df['model_name'] != 'reference')
mask3 = (data_df[human_col].isnull() == False)
mask = mask1 & mask2 & mask3
X = data_df[metric1][mask]
Y = data_df[metric2][mask]
Z = data_df[human_col][mask]
if partial_correlation_variable is not None:
Q = np.array(data_df[mask][partial_correlation_variable])
enc = OneHotEncoder(handle_unknown='ignore')
Q = enc.fit_transform(Q.reshape(-1, 1))
pred_X = LinearRegression().fit(Q, X).predict(Q)
pred_Y = LinearRegression().fit(Q, Y).predict(Q)
pred_Z = LinearRegression().fit(Q, Z).predict(Q)
X = X - pred_X
Y = Y - pred_Y
Z = Z - pred_Z
r12, _ = pearsonr(X, Z)
r13, _ = pearsonr(Y, Z)
r23, _ = pearsonr(X, Y)
n = min(len(X), len(Y))
if r12 < r13:
r12, r13 = r13, r12
_, p = williams_test(r12, r13, r23, n)
correlation_metric.append(r23)
williams_metric.append(p)
correlations.append(correlation_metric)
williams.append(williams_metric)
correlations_df = pd.DataFrame(correlations, index=metrics_cols, columns=metrics_cols)
williams_df = pd.DataFrame(williams, index=metrics_cols, columns=metrics_cols)
return (correlations_df, williams_df)
def ablation(
data_df,
human_col,
ablations_cols,
metrics_cols,
partial_correlation=True,
partial_correlation_variable=None
):
"""
human_df: pandas dataframe, should only contain one column corresponding to human judgements
ablations_df: pandas dataframe, each column corresponds to a different ablation of the human judgements
metrics_df: pandas dataframe, columns are metrics.
partial_correlation: bool - whether to use partial correlations.
returns a dataframe each row corresponding to a different ablation
"""
ablations_dict = dict()
human_df = human_metric_correlation(data_df, human_col, metrics_cols, partial_correlation=partial_correlation, partial_correlation_variable=partial_correlation_variable)
human_correlation = human_df['pearson']
for ablation in ablations_cols:
ablation_df = human_metric_correlation(data_df, ablation, metrics_cols, partial_correlation=partial_correlation, partial_correlation_variable=partial_correlation_variable)
ablation_correlation = ablation_df['pearson']
ablations_dict[ablation] = human_correlation - ablation_correlation
ablations_df = pd.DataFrame(ablations_dict, index=metrics_cols)
return ablations_df
def plot_ablations(ablation_df, save_path):
"""
ablation_df: pandas dataframe, the output of ablation function
save_path: str, where to save the plot
Plots the ablation_df and possibly saves it to the location
"""
ax = ablation_df.plot.bar(figsize=(10, 4), rot=0)
plt.xticks(rotation=45)
if not save_path:
save_path = '.'
fig = ax.get_figure()
fig.savefig(os.path.join(save_path, 'ablations_plot.pdf'), bbox_inches='tight')
def main(args):
"""
Depending on the `mode` used, this script computes correlation between factuality metrics
and human judgments of factuality on the FRANK benchmark data. It can also measure how well
a metric captures certain types of errors.
The code uses baseline metric outputs provided as part of FRANK (in `baseline_facutlaity_metrics_outputs.json`).
The user can specify which metrics among the baseline metrics to use in the computation.
In addition to the baseline metrics, this tool allows users to evaluate their own factuality metric outputs on
FRANK. There are two ways to do so:
1. By providing a FRANK benchmark submission file: a `json` files containing a list of records, each record
having both `hash` and `model_name` fields as well as a `score` field with the metric output.
2. By defining a `json` file with information on how to parse the metric output files.
the schema should look like:
[
{
"path": "PATH_TO_JSON_FILE_WITH_OUTPUS"
"scores": [
{"name": "PRETTY NAME FOR THE METRIC 1", "key": "THE KEY CONTAINING THE METRIC 1 OUTPUT"},
{"name": "PRETTY NAME FOR THE METRIC 2", "key": "THE KEY CONTAINING THE METRIC 2 OUTPUT"},
...
]
},...
]
Note that the output files should still be `json` files with a list of records with `hash` and
`model_name` keys, but they can contain several metrics outputs in each record .
This allows to specify a name for each metric, and allows several metrics for each output file.
"""
# Load the human judgements.
data_df = pd.read_json(args['human_eval_path'])
human_col = args['human']
ablations_cols = args['ablations']
metrics_cols = []
# Load the metric outputs.
if not args['no_baseline_metrics']:
metric_df = pd.read_json(args['baseline_metrics_outputs'])
for baseline_metric in args['baseline_metrics']:
assert baseline_metric in metric_df, baseline_metric + ' not found. Your metrics_output_info file is likely not well defined.'
data_df = data_df.merge(metric_df[['hash', 'model_name'] + args['baseline_metrics']], on=['hash', 'model_name'], validate='one_to_one')
metrics_cols += args['baseline_metrics']
if args['metrics_outputs']:
metric_df = | pd.read_json(args['metrics_outputs']) | pandas.read_json |
import jieba
import jieba.analyse as analyse
import jieba.posseg # 输出带词性
import copy
import wordcloud
import streamlit as st
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib
from wordcloud import WordCloud # 词云包
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import imageio
matplotlib.rcParams['figure.figsize'] = (18.0, 18.0) # 定义长宽
from wordcloud import WordCloud, ImageColorGenerator
import pylab
data = list(open(r'D:\conda\TF-idf\data\test.txt', encoding='utf-8'))
datawenzi = copy.copy(data[0])
data = jieba.lcut(data[0]) # 输出不带词性
# print(jieba.posseg.lcut(data[0])) # 输出带词性
data = pd.DataFrame(data, columns=['ci'])
# 方法一:用正则表达式的形式去除掉停用词中的换行符号
# stop=list(open(r'D:\conda\TF-idf\data\stopwords.txt',encoding='utf-8'))
# for i in range(len(stop)):
# stop[i]=re.sub('\n','',stop[i])
# stop=pd.DataFrame(stop,columns=['stop'])
# 方法二:直接用pd获取停用词大全 其中quoting=3 代表将 英文双引号的内容也要识别出来,而txt文件的默认编码方式为encoding='utf-8'
stop = pd.read_csv(r'D:\conda\TF-idf\data\stopwords.txt', encoding='utf-8', index_col=False, sep='\t', names=['stop'],
quoting=3)
data = data[~data.ci.isin(stop.stop)] # 用匹配的方式,将data中的停用词给去除
def cipin(data1): # 导入data
data1gr = data1.groupby('ci')['ci'].agg(np.size)
data1gr.name = 'shu'
data1gr = data1gr.reset_index().sort_values(by=['shu'], ascending=False)
return data1gr
def tf(data2): # 导入datawenzi
key = analyse.extract_tags(data2, topK=30, withWeight=True, allowPOS=()) # withWeight为加上权重
keyci = []
keyshu = []
for i in range(len(key)):
keyci.append(key[i][0])
keyshu.append(key[i][1])
keyci1 = | pd.DataFrame(keyci, columns=['ci']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
def process_brand(x):
if | pd.isnull(x) | pandas.isnull |
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
from enum import Enum
from typing import List
import pandas as pd
class Prediction:
""" General Prediction class used to capture output from surrogate model .predict() methods
PredictionSchema defines the known universe of .predict() dataframe columns. Column names
will be restricted to the enum values.
"""
class LegalColumnNames(Enum):
""" Enum class standardizing the data columns returned by a surrogate model's predict method
The class defines the "universe" of data returned by a predict() method, but not
all surrogate models must return all columns.
"""
# boolean indicating if predict() feature row could be used to make a prediction
IS_VALID_INPUT = 'is_valid_input'
# given an instance of the independent variable(s), what's the predicted dependent variable's value
PREDICTED_VALUE = 'predicted_value'
""" References:
https://en.wikipedia.org/wiki/Prediction_interval
https://stats.stackexchange.com/questions/16493/difference-between-confidence-intervals-and-prediction-intervals
https://haozhestat.github.io/files/manuscript_RFIntervals_FinalVersion.pdf
https://www.theoj.org/joss-papers/joss.00124/10.21105.joss.00124.pdf
"""
PREDICTED_VALUE_VARIANCE = 'predicted_value_variance'
PREDICTED_VALUE_DEGREES_OF_FREEDOM = 'predicted_value_degrees_of_freedom'
# https://en.wikipedia.org/wiki/Sample_mean_and_covariance#Sample_mean
SAMPLE_MEAN = 'sample_mean'
# https://en.wikipedia.org/wiki/Variance#Sample_variance
SAMPLE_VARIANCE = 'sample_variance'
SAMPLE_SIZE = 'sample_size'
DEGREES_OF_FREEDOM = 'degrees_of_freedom'
@classmethod
def create_prediction_from_dataframe(cls, objective_name: str, dataframe: pd.DataFrame):
assert objective_name is not None
predictor_outputs = [
Prediction.LegalColumnNames(column_name)
for column_name
in dataframe.columns.values
]
return Prediction(
objective_name=objective_name,
predictor_outputs=predictor_outputs,
dataframe=dataframe
)
def __init__(
self,
objective_name: str,
predictor_outputs: List[LegalColumnNames],
dataframe_index: pd.Index = None,
dataframe: pd.DataFrame = None,
num_head_rows_to_print: int = 1,
allow_extra_columns: bool = False
):
self.objective_name = objective_name
self.num_head_rows_to_print = num_head_rows_to_print
# validate passed args
for output_enum in predictor_outputs:
assert output_enum in set(column_name for column_name in Prediction.LegalColumnNames), \
f'PredictionSchema Error: Passed PredictionSchema enum "{output_enum}" not in Prediction.PredictionSchema'
self.predictor_outputs = predictor_outputs
# expect dataframe column names to be values from Enum above
self.expected_column_names = [output_enum.value for output_enum in self.predictor_outputs]
self.allow_extra_columns = allow_extra_columns
self._dataframe = pd.DataFrame(columns=self.expected_column_names, index=dataframe_index)
if dataframe is not None:
self.set_dataframe(dataframe)
def set_dataframe(self, dataframe: pd.DataFrame):
self.validate_dataframe(dataframe)
if self._dataframe.index.empty or (len(self._dataframe.index) == len(dataframe.index) and self._dataframe.index.equals(dataframe.index)):
self._dataframe = dataframe
else:
self._dataframe.loc[dataframe.index, self.expected_column_names] = dataframe[self.expected_column_names]
def validate_dataframe(self, dataframe: pd.DataFrame):
if not self.allow_extra_columns:
# validate passed columns exist in LegalColumnNames enum
for column_name in dataframe.columns.values:
assert column_name in self.expected_column_names, \
f'PredictionSchema Error: Failed to find "{column_name}" in Prediction.PredictionSchema class'
# validate all declared columns (in model's SCHEMA) are present in the dataframe
for expected_column_name in self.expected_column_names:
assert expected_column_name in dataframe.columns.values, \
f'PredictionSchema Error: Failed to find expected column name "{expected_column_name}" in passed dataframe'
mean_variance_col = self.LegalColumnNames.PREDICTED_VALUE_VARIANCE.value
sample_variance_col = self.LegalColumnNames.SAMPLE_VARIANCE.value
if mean_variance_col in self.expected_column_names:
if dataframe[mean_variance_col].notnull().any():
assert (dataframe[mean_variance_col].notnull() >= 0).all()
if sample_variance_col in self.expected_column_names:
if dataframe[sample_variance_col].notnull().any():
assert (dataframe[sample_variance_col].notnull() >= 0).all()
@classmethod
def get_enum_by_column_name(cls, column_name):
return Prediction.LegalColumnNames(column_name)
def get_dataframe(self):
return self._dataframe
@classmethod
def dataframe_from_json(cls, json_string):
return | pd.read_json(json_string, orient='index') | pandas.read_json |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import warnings
warnings.filterwarnings('ignore')
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../../../input/mathijs_weather-data-in-new-york-city-2016"))
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.grid_search import GridSearchCV
import xgboost
print()
# In[ ]:
df = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
df.passenger_count = df.passenger_count.astype(np.uint8)
df.vendor_id = df.vendor_id.astype(np.uint8)
df.trip_duration = df.trip_duration.astype(np.uint32)
for c in [c for c in df.columns if c.endswith('tude')]:
df.loc[:,c] = df[c].astype(np.float32)
print(df.memory_usage().sum()/2**20)
df.pickup_datetime=pd.to_datetime(df.pickup_datetime)
df.dropoff_datetime=pd.to_datetime(df.dropoff_datetime)
df['pu_hour'] = df.pickup_datetime.dt.hour
df['yday'] = df.pickup_datetime.dt.dayofyear
df['wday'] = df.pickup_datetime.dt.dayofweek
df['month'] = df.pickup_datetime.dt.month
# In[ ]:
sns.set_style('white')
sns.set_context("paper",font_scale=2)
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11,9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
print()
# In[ ]:
fig, ax = plt.subplots(ncols=1, nrows=1)
sns.distplot(df['trip_duration']/3600,ax=ax,bins=100,kde=False,hist_kws={'log':True})
# In[ ]:
fig, ax = plt.subplots(ncols=1, nrows=1)
ax.set_xlim(0,30)
sns.distplot(df['trip_duration']/3600,ax=ax,bins=1000,kde=False,hist_kws={'log':True})
# In[ ]:
def haversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
miles = km * 0.621371
return miles
# In[ ]:
df['distance'] = haversine(df.pickup_longitude, df.pickup_latitude, df.dropoff_longitude, df.dropoff_latitude)
wdf = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
# In[ ]:
wdf['date']=pd.to_datetime(wdf.date,format='%d-%m-%Y')
wdf['yday'] = wdf.date.dt.dayofyear
# In[ ]:
wdf.head()
# In[ ]:
wdf['snowfall'] = wdf['snow fall'].replace(['T'],0.05).astype(np.float32)
wdf['precipitation'] = wdf['precipitation'].replace(['T'],0.05).astype(np.float32)
wdf['snowdepth'] = wdf['snow depth'].replace(['T'],0.05).astype(np.float32)
# In[ ]:
df = pd.merge(df,wdf,on='yday')
df.head()
# In[ ]:
df = df.drop(['date','maximum temperature','minimum temperature'],axis=1)
# In[ ]:
sns.set_style('white')
sns.set_context("paper",font_scale=2)
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11,9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
print()
# In[ ]:
corr.head()
# In[ ]:
fastest1 = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
fastest2 = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
fastest = pd.concat([fastest1,fastest2],ignore_index=True)
fastest = fastest.drop(['step_location_list','step_direction','step_maneuvers','travel_time_per_step','distance_per_step','street_for_each_step','number_of_steps','starting_street','end_street'],axis=1)
fastest.head() #
# In[ ]:
df = pd.merge(df,fastest,on='id',how='outer')
df.head()
# In[ ]:
mask = ((df.trip_duration > 60) & (df.distance < 0.05))
df = df[~mask]
mask = (df.trip_duration < 60)
df = df[~mask]
mask = df.trip_duration > 79200
df = df[~mask]
mask = df.distance/(df.trip_duration/3600) > 60
df = df[~mask]
df.trip_duration = df.trip_duration.astype(np.uint16)
df = df[df.passenger_count > 0]
# In[ ]:
m = df.groupby(['wday','vendor_id'])[['trip_duration']].apply(np.median)
m.name = 'trip_duration_median'
df = df.join(m, on=['wday','vendor_id'])
# In[ ]:
sns.lmplot(y='trip_duration_median', x='wday',data=df, fit_reg=False, hue='vendor_id')
# In[ ]:
m = df.groupby(['pu_hour','vendor_id'])[['trip_duration']].apply(np.median)
m.name ='trip_duration_median_hour'
df = df.join(m, on=['pu_hour','vendor_id'])
# In[ ]:
sns.lmplot(y='trip_duration_median_hour', x='pu_hour',data=df, fit_reg=False, hue='vendor_id')
# In[ ]:
jfk_lon = -73.778889
jfk_lat = 40.639722
lga_lon = -73.872611
lga_lat = 40.77725
# In[ ]:
df['jfk_pickup_dist'] = df.apply(lambda row: haversine(jfk_lon, jfk_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)
df['lga_pickup_dist'] = df.apply(lambda row: haversine(lga_lon, lga_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)
df['jfk_dropoff_dist'] = df.apply(lambda row: haversine(jfk_lon, jfk_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)
df['lga_dropoff_dist'] = df.apply(lambda row: haversine(lga_lon, lga_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)
# In[ ]:
fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True)
ax[0,0].set_xlim(0,50)
sns.distplot(df['jfk_pickup_dist'],ax=ax[0,0],bins=100,kde=False,hist_kws={'log':True})
sns.distplot(df['jfk_dropoff_dist'],ax=ax[0,1],bins=100,kde=False,hist_kws={'log':True})
sns.distplot(df['lga_pickup_dist'],ax=ax[1,0],bins=100,kde=False,hist_kws={'log':True})
sns.distplot(df['lga_dropoff_dist'],ax=ax[1,1],bins=100,kde=False,hist_kws={'log':True})
# In[ ]:
df['jfk'] = ((df['jfk_pickup_dist'] < 2) | (df['jfk_dropoff_dist'] < 2))
df['lga'] = ((df['lga_pickup_dist'] < 2) | (df['lga_dropoff_dist'] < 2))
df = df.drop(['jfk_pickup_dist','lga_pickup_dist','jfk_dropoff_dist','lga_dropoff_dist'],axis=1)
df.head()
# In[ ]:
df['workday'] = ((df['pu_hour'] > 8) & (df['pu_hour'] < 18))
df.head()
# In[ ]:
fig, ax = plt.subplots(ncols=1, nrows=1,figsize=(12,10))
plt.ylim(40.6, 40.9)
plt.xlim(-74.1,-73.7)
ax.scatter(df['pickup_longitude'],df['pickup_latitude'], s=0.01, alpha=1)
# ## RMSLE: Evaluation Metric
# In[ ]:
def rmsle(evaluator,X,real):
sum = 0.0
predicted = evaluator.predict(X)
print("Number predicted less than 0: {}".format(np.where(predicted < 0)[0].shape))
predicted[predicted < 0] = 0
for x in range(len(predicted)):
p = np.log(predicted[x]+1)
r = np.log(real[x]+1)
sum = sum + (p-r)**2
return (sum/len(predicted))**0.5
# ## Load test data
# In[ ]:
tdf = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
tdf.pickup_datetime=pd.to_datetime(tdf.pickup_datetime)
#tdf.dropoff_datetime=pd.to_datetime(tdf.dropoff_datetime)
tdf['pu_hour'] = tdf.pickup_datetime.dt.hour
tdf['yday'] = tdf.pickup_datetime.dt.dayofyear
tdf['wday'] = tdf.pickup_datetime.dt.dayofweek
tdf['month'] = tdf.pickup_datetime.dt.month
tdf['distance'] = haversine(tdf.pickup_longitude, tdf.pickup_latitude, tdf.dropoff_longitude, tdf.dropoff_latitude)
fastest_test = | pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv") | pandas.read_csv |
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import xgboost as xgb
import operator
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
from sklearn.preprocessing import Imputer
from sklearn.ensemble import RandomForestRegressor
plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def check_nan():
"""
缺失值含义分析与删除
"""
train = pd.read_csv("./data/house/train.csv")
na_count = train.isnull().sum().sort_values(ascending=False)
na_rate = na_count / len(train)
na_data = pd.concat([na_count, na_rate], axis=1, keys=['count', 'rate'])
na_data.head(20)
print(na_data)
# 删除缺失值比较多的特征 删除了缺失值超过20%的特征
train = train.drop(na_data[na_data['rate'] > 0.20].index, axis=1)
d_count = train.isnull().sum().sort_values(ascending=False)
print(d_count)
return train
def load_data():
# train = pd.read_csv("./data/house/train.csv")
train = check_nan()
for f in train.columns:
if train[f].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values))
train[f] = lbl.transform(list(train[f].values))
x = train.drop(['SalePrice', 'Id'], 1)
y = train['SalePrice']
return x, y
def feature_selection():
"""
特征选择
"""
x, y = load_data()
params = {
# 节点的最少特征数 这个参数用于避免过拟合。当它的值较大时,可以避免模型学习到局部的特殊样本。 但是如果这个值过高,会导致欠拟合。这个参数需要使用CV来调整。
'min_child_weight': 100,
'eta': 0.02, # 如同学习率 [默认0.3]
'colsample_bytree': 0.7, # 用来控制每棵随机采样的列数的占比(每一列是一个特征)。 典型值:0.5-1
# 这个值为树的最大深度。 这个值也是用来避免过拟合的。max_depth越大,模型会学到更具体更局部的样本。 需要使用CV函数来进行调优。 典型值:3-10
'max_depth': 12,
'subsample': 0.7, # 采样训练数据,设置为0.7
'alpha': 1, # L1正则化项 可以应用在很高维度的情况下,使得算法的速度更快。
'gamma': 1, # Gamma指定了节点分裂所需的最小损失函数下降值。 这个参数的值越大,算法越保守。
'silent': 1, # 0 打印正在运行的消息,1表示静默模式。
'verbose_eval': True,
'seed': 12
}
xgtrain = xgb.DMatrix(x, label=y)
bst = xgb.train(params, xgtrain, num_boost_round=10)
features = [x for x in x.columns if x not in ['SalePrice', 'id']]
create_feature_map(features)
# 获得每个特征的重要性
importance = bst.get_fscore(fmap='./data/house/xgb.fmap')
print("特征数量", len(importance))
# 重要性排序
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=["feature", "fscore"])
df["fscore"] = df["fscore"] / df['fscore'].sum()
print(df)
label = df['feature'].T.values
xtop = df['fscore'].T.values
idx = np.arange(len(xtop))
fig = plt.figure(figsize=(12, 6))
plt.barh(idx, xtop, alpha=0.8)
plt.yticks(idx, label,)
plt.grid(axis='x') # 显示网格
plt.xlabel('重要性')
plt.ylabel('特征')
plt.title('XGBoost 特征选择图示')
plt.show()
def create_feature_map(features):
outfile = open('./data/house/xgb.fmap', 'w')
i = 0
for feat in features:
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
i = i + 1
outfile.close()
def feature_ref():
x, y = load_data()
data_col=x.columns
# 缺失值补全
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(x)
x=imp.transform(x)
print("2",np.isnan(x).any())
model = SVR(kernel='linear')
# 选取影响最大的5个特征
rfe = RFE(model, 5)
rfe = rfe.fit(x, y)
for i, v in enumerate(rfe.support_):
if v:
print(data_col[i])
def train():
PATH = "./data/house/"
df_train = pd.read_csv(f'{PATH}train.csv', index_col='Id')
df_test = pd.read_csv(f'{PATH}test.csv', index_col='Id')
target = df_train['SalePrice']
df_train = df_train.drop('SalePrice', axis=1)
df_train['training_set'] = True
df_test['training_set'] = False
df_full = pd.concat([df_train, df_test])
df_full = df_full.interpolate()
df_full = pd.get_dummies(df_full)
df_train = df_full[df_full['training_set'] == True]
df_train = df_train.drop('training_set', axis=1)
df_test = df_full[df_full['training_set'] == False]
df_test = df_test.drop('training_set', axis=1)
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1)
rf.fit(df_train, target)
preds = rf.predict(df_test)
my_submission = | pd.DataFrame({'Id': df_test.index, 'SalePrice': preds}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 19:07:55 2019
@author: aman
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
gridsizes = [1,2,3,4,6,8,12,16,24,32,48,64]
overlap = [2,4,8,16,32,64,128,256]
playbackrates = [5,10,15,30,40,60,80,100]
def load(fname,threshold=50):
data = pd.read_csv(fname,usecols=range(4))
sync = data.DataElement0.diff()
dsync = sync > threshold
rising = np.flatnonzero(np.diff(np.int8(dsync)) > 0.0)+1
risetimes = data.Timestamp[rising]
trials = np.diff(risetimes) > 1.5 # split when stable more than 1.5 secs
risetimes = np.split(risetimes,np.flatnonzero(trials)+1)
fps = [2/t.diff().mean() for t in risetimes]
return data,sync,dsync,risetimes,fps
def stats_fps(trials,**kwargs):
data = | pd.DataFrame([t[-1] for t in trials]) | pandas.DataFrame |
from pyg_base import loop, eq, drange, Dict
import pandas as pd; import numpy as np
import pytest
from numpy import array
SP = lambda a, b: Dict(s = a+b, p = a*b)
AB = lambda a, b: a+b
def S(v):
if isinstance(v, list):
return [S(w) for w in v]
else:
return v.s
def test_loop_dict():
f = loop(dict)(AB)
assert f(1,2) == 3
assert f(1, b=2) == 3
assert f(a=1, b=2) ==3
assert f(dict(a=1,b=2), 2) == dict(a = 3, b = 4)
assert f(dict(a=1,b=2), dict(a=2, b=3)) == dict(a = 3, b = 5)
b = | pd.Series([2,3], ['a','b']) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.compose import TransformedTargetRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import FunctionTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from IPython.display import display # Jupyter display
# by default drop_first to avoid the 'dummy variable trap'
DROP = True
class Debug(BaseEstimator, TransformerMixin):
"""This class is designed to be used as an intermediate step in `Pipeline`s.
"""
def __init__(self, rows=5):
"""`rows`: number of rows of the transformed X to store for debugging purposes
"""
self.rows = rows
def fit(self, X, y=None):
return self
def transform(self, X):
self.X_head = X[: self.rows, :]
return X
class ColumnReorder(FunctionTransformer):
"""
this custom transformer class is specifically designed to be used after a
`ColumnTransformer` in a `Pipeline`,
and reorder the columns transformed by the `ColumnTransformer` back to the
original ordering of the `X` columns
"""
def __init__(self, initial_features, trans_features):
"""
`sklearn.base.BaseEstimator`: "all estimators should specify all
the parameters that can be set at the class level in their __init__
as explicit keyword arguments (no *args or **kwargs)".
Therefore, we need the internal versions of the parameters too
"""
super().__init__(
func=self._col_reorder_func,
validate=True,
kw_args={"init_feats": initial_features, "trans_feats": trans_features,},
)
# `get_params` looks at the internal versions
self.initial_features = initial_features
self.trans_features = trans_features
# private static method
@staticmethod
def _col_reorder_func(X, init_feats, trans_feats):
res_feats = trans_feats.copy()
for feat in init_feats:
if feat not in res_feats:
res_feats.append(feat)
# now `res_features` contains feature names in the transformed version
order_ind = [res_feats.index(x0) for x0 in init_feats]
X[:] = X[:, order_ind]
return X
class CustomPipeline(Pipeline):
"""A Pipeline that exposes `coef_` or `feature_importances_`
Note: `Pipeline` has a property called `_final_estimator`
"""
@property
def coef_(self):
return self._final_estimator.coef_
@property
def feature_importances_(self):
return self._final_estimator.feature_importances_
class CustomTransformedTargetRegressor(TransformedTargetRegressor):
def __init__(self, regressor, trans_y):
trans_funcs = {
"log": {"func": np.log, "inverse_func": np.exp},
"sqrt": {"func": np.sqrt, "inverse_func": lambda a: np.power(a, 2)},
"none": {"func": lambda a: a, "inverse_func": lambda a: a},
}
func = trans_funcs[trans_y]["func"]
inverse_func = trans_funcs[trans_y]["inverse_func"]
# if you don't use super(), you'll have to pass all arguments
super().__init__(regressor=regressor, func=func, inverse_func=inverse_func)
self.trans_y = trans_y
@property
def feature_importances_(self):
return self.regressor_.feature_importances_
@property
def coef_(self):
return self.regressor_.coef_
# in case it has `alpha_` as in MultiTaskLassoCV
@property
def alpha_(self):
return self.regressor_.alpha_
def add_unique_os_columns(df, NESTED_CATEGORICAL):
# Unique OS-Names excluding NaNs
# we use a dictionary, because we'd like to track the presence (1) or absence (0) of unique values (need to remove one: dummy variable trap)
# Tip: add prefix to column in pandas: df['col'] = 'prefix' + df['col'].astype(str)
unique_os_dic = {
os: 1 for os in ("OS_" + df["os_name"].dropna().astype(str)).unique()
}
os_base = "OS_CentOS"
for uos in unique_os_dic:
# Tip: create a new column in DataFrame based on a condition on another columns, axis=1 uses for different rows
if NESTED_CATEGORICAL == False: # using weighted dummy variables
# Tip: Use df.apply(func, axis=1) to send every single row to a function. Axis along which the function is applied: 0 or 'index': apply function to each column. 1 or 'columns': apply function to each row
df[uos] = df.apply(
lambda row: int(str(row["os_vid"]).replace(".", ""))
if row["os_name"] == uos.replace("OS_", "")
else 0,
axis=1,
)
else:
df[uos] = df.apply(
lambda row: 1 if row["os_name"] == uos.replace("OS_", "") else 0, axis=1
)
df["IA_" + uos] = df.apply(
lambda row: row[uos] * float(row["os_vid"]), axis="columns"
)
if DROP == True:
##### NOTE: if you want to drop the first dummy
df.drop(os_base, "columns")
unique_os_dic[os_base] = 0
# no need to remove one interaction in either case,
# because if the main effect is absent, we still need
# interaction terms for all unique values
print("Remove one OS dummy var: ", os_base)
print("unique_OS_dic: ", unique_os_dic)
return unique_os_dic
def add_unique_compiler_columns(df, NESTED_CATEGORICAL):
comp_name_vid_ser = (
df["compiler"]
.str.split("Build")
.str[0]
.str.split("Compiler")
.str[0]
.str.replace(",", "")
.str.replace(".", "")
.str.replace(":", "")
.str.replace(r"C/C\+\+/Fortran", "")
.str.replace(":", "")
.str.replace(r"C/C\+\+", "")
.str.replace("Version", "")
.str.strip()
)
# ['1901144 of Intel', ...]
df["comp_name"] = comp_name_vid_ser.str.split("of", 1).str[1].str.strip()
df["comp_vid"] = comp_name_vid_ser.str.split("of", 1).str[0].str.strip().str[:4]
unique_comp_name_vid_list = comp_name_vid_ser.dropna().unique().tolist()
# Tip: unique values in a list: convert it to 'set'
unique_compiler_dic = {
comp: 1
for comp in list(
set(
[
"COMP_" + i.split("of", 1)[1].strip()
for i in unique_comp_name_vid_list
]
)
)
}
comp_base = "COMP_AOCC"
# Tip: manual long to wide
for ucomp in unique_compiler_dic:
if NESTED_CATEGORICAL == False: # using weighted dummy variables
df[ucomp] = df.apply(
lambda row: int(row["comp_vid"])
if row["comp_name"] == ucomp.replace("COMP_", "")
else 0,
axis=1,
)
else:
df[ucomp] = df.apply(
lambda row: 1 if row["comp_name"] == ucomp.replace("COMP_", "") else 0,
axis=1,
)
df["IA_" + ucomp] = df.apply(
lambda row: row[ucomp] * float(row["comp_vid"]), axis="columns"
)
if DROP == True:
##### NOTE: if you want to drop the first dummy
df.drop(comp_base, "columns")
unique_compiler_dic[comp_base] = 0
# no need to remove one interaction in either case,
# because if the main effect is absent, we still need
# interaction terms for all unique values
print("Remove one Compiler dummy var: ", comp_base)
print("unique_Compiler_dic: ", unique_compiler_dic)
return unique_compiler_dic
def make_Xy_df(
all_data_df,
NESTED_CATEGORICAL,
numerical_predictors,
categorical_predictors,
unique_oses,
unique_compilers,
benchmarks,
test_size=0.2,
shuffle=True,
random_state=None,
):
"""
Get a df, convert all features to numerics, return X_df, y_df, ty_df, Xy_df
"""
#####
# split into train and test
train_df, test_df = train_test_split(
all_data_df.copy(),
test_size=test_size,
shuffle=shuffle,
random_state=random_state,
)
# transform predictors
def transform_predictors(inp_df):
num_predictors = (
numerical_predictors.copy()
) # to be able to extend in different calls without touching the original
unique_os_interacts = ["IA_" + o for o in unique_oses]
unique_comp_interacts = ["IA_" + c for c in unique_compilers]
if NESTED_CATEGORICAL == True:
# Tip: extend a list with multiple lists
num_predictors += (
[o for o in unique_oses if unique_oses[o] == 1]
+ unique_os_interacts
+ [c for c in unique_compilers if unique_compilers[c] == 1]
+ unique_comp_interacts
)
else:
num_predictors += unique_oses + unique_compilers
num_df = inp_df[
num_predictors
] # in this technique, the VIDs are already added to the dummy variables
cat_df = inp_df[categorical_predictors]
###################################
# Change categorical to dummy,
# concat them to numerical and build the final df of all features
###################################
if not cat_df.empty:
if DROP == True:
# Tip: DataFrames, avoid the dummy variable trap by
# dropping the first dummy variable
dummy_df = | pd.get_dummies(cat_df, drop_first=True) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
import geopandas as gpd
import pandas as pd
import numpy as np
from datetime import datetime, timedelta, date
import requests
import json
from rasterstats import point_query
from shapely import geometry as sgeom
import ulmo
from collections import OrderedDict
import math
from random import sample
#########################################################################
############################ USER INPUTS ################################
#########################################################################
# NOTE: to run assim, set irun_data_assim = 1 in .par file
# DOMAIN
# choose the modeling domain
domain = 'CA'
# TIME
# choose if want to set 'manual' or 'auto' date
date_flag = 'manual'
# If you choose 'manual' set your dates below
st_dt = '2018-10-01'
ed_dt = '2019-09-30'
# select assimilation variable
var = 'all'
# PATHS
assimPath = '/nfs/attic/dfh/Aragon2/CSOassim/'+domain+'/'
dataPath = '/nfs/attic/dfh/Aragon2/CSOdmn/'+domain+'/'
#path to dem .tif
dem_path = dataPath + 'DEM_'+domain+'.tif'
#path to landcover .tif
lc_path = dataPath + 'NLCD2016_'+domain+'.tif'
#path to SnowModel
SMpath = '/nfs/attic/dfh/Aragon2/CSOsm/'+domain+'/'+var+'/'
gdatpath = '/scratch/Nina/CSOdata/'+domain+'/'
#########################################################################
# Date setup function
def set_dates(st_dt,ed_dt,date_flag):
if date_flag == 'auto':
# ###automatically select date based on today's date
hoy = date.today()
antes = timedelta(days = 3)
#end date 3 days before today's date
fecha = hoy - antes
eddt = fecha.strftime("%Y-%m-%d")
#whole water year
if (hoy.month == 10) & (hoy.day == 3):
eddt = fecha.strftime("%Y-%m-%d")
stdt = str(hoy.year - 1)+'-10-01'
#start dates
elif fecha.month <10:
stdt = str(fecha.year - 1)+'-10-01'
else:
stdt = str(fecha.year)+'-10-01'
elif date_flag == 'manual':
stdt = st_dt
eddt = ed_dt
return stdt, eddt
stdt, eddt = set_dates(st_dt,ed_dt,date_flag)
print(stdt, eddt)
#########################################################################
# CSO Functions
#########################################################################
# Function to get SWE from CSO Hs
def swe_calc(gdf):
#convert snow depth to mm to input into density function
H = gdf.depth.values*10
#Get temp info at each point
TD = np.array([point_query([val], '/nfs/attic/dfh/data/depth2swe/td_final.txt')[0] for val in gdf.geometry])
#Get pr info at each point
PPTWT = np.array([point_query([val], '/nfs/attic/dfh/data/depth2swe/ppt_wt_final.txt')[0] for val in gdf.geometry])
#Determine day of year
dates = pd.to_datetime(gdf.timestamp, format='%Y-%m-%dT%H:%M:%S').dt.date.values
DOY = [date.toordinal(date(dts.year,dts.month,dts.day))-date.toordinal(date(dts.year,9,30)) for dts in dates]
DOY = np.array([doy + 365 if doy < 0 else doy for doy in DOY])
#Apply regression equation
a = [0.0533,0.948,0.1701,-0.1314,0.2922] #accumulation phase
b = [0.0481,1.0395,0.1699,-0.0461,0.1804]; #ablation phase
SWE = a[0]*H**a[1]*PPTWT**a[2]*TD**a[3]*DOY**a[4]*(-np.tanh(.01*\
(DOY-180))+1)/2 + b[0]*H**b[1]*PPTWT**b[2]*TD**b[3]*DOY**b[4]*\
(np.tanh(.01*(DOY-180))+1)/2;
#convert swe to m to input into SM
gdf['swe'] = SWE/1000
gdf['doy'] = DOY
gdf['H'] = H
return gdf
# Function to build geodataframe of CSO point observations
def get_cso(st, ed, domain):
'''
st = start date 'yyyy-mm-dd'
ed = end date 'yyyy-mm-dd'
domain = string label of defined CSO domain
'''
#path to CSO domains
domains_resp = requests.get("https://raw.githubusercontent.com/snowmodel-tools/preprocess_python/master/CSO_domains.json")
domains = domains_resp.json()
Bbox = domains[domain]['Bbox']
stn_proj = domains[domain]['stn_proj']
mod_proj = domains[domain]['mod_proj']
#Issue CSO API observations request and load the results into a GeoDataFrame
params = {
"bbox": f"{Bbox['lonmin']},{Bbox['latmax']},{Bbox['lonmax']},{Bbox['latmin']}",
"start_date": st,
"end_date": ed,
"format": "geojson",
"limit": 5000,
}
csodata_resp = requests.get("https://api.communitysnowobs.org/observations", params=params)
csodatajson = csodata_resp.json()
#turn into geodataframe
gdf = gpd.GeoDataFrame.from_features(csodatajson, crs=stn_proj)
mask = (gdf['timestamp'] >= st) & (gdf['timestamp'] <= ed)
gdf = gdf.loc[mask]
gdf=gdf.reset_index(drop=True)
print('Total number of CSO in domain = ',len(gdf))
#ingdf = extract_meta(gdf,domain,dem_path,lc_path)
ingdf = swe_calc(gdf)
ingdf_proj = ingdf.to_crs(mod_proj)
ingdf['dt'] = pd.to_datetime(ingdf['timestamp'], format='%Y-%m-%dT%H:%M:%S').dt.date
ingdf['Y'] = pd.DatetimeIndex(ingdf['dt']).year
ingdf['M'] = pd.DatetimeIndex(ingdf['dt']).month
ingdf['D'] = pd.DatetimeIndex(ingdf['dt']).day
ingdf["x"] = ingdf_proj.geometry.x
ingdf["y"] = ingdf_proj.geometry.y
return ingdf
# QA/QC function for CSO data
def qaqc_iqr(csodf):
print('Performing qa/qc on CSO data using IQR method')
clim_dir = '/nfs/attic/dfh/data/snodas/snodas_tif/clim/'
iqr_flag = []
for i in range(len(csodf)):
# get cso snow depth
csohs = csodf.H[i]
# get date
dates = pd.to_datetime(csodf.timestamp[i], format='%Y-%m-%dT%H:%M:%S')
# define path names for 1st and 3rd doy quantiles
q1_Fname = clim_dir+dates.strftime("%m")+dates.strftime("%d")+'1036q1.tif'
q3_Fname = clim_dir+dates.strftime("%m")+dates.strftime("%d")+'1036q3.tif'
q1 = point_query([csodf.geometry[i]], q1_Fname)[0]
q3 = point_query([csodf.geometry[i]], q3_Fname)[0]
IQR = q3-q1
# False = outlier
iqr_flag.append((csohs > (q1-1.5*IQR)) & (csohs < (q3+1.5*IQR)))
csodf['iqr_flag'] = iqr_flag
csodf_clean = csodf.loc[csodf['iqr_flag'] == True]
csodf_clean = csodf_clean.reset_index(drop=True)
return csodf_clean
#########################################################################
# SNOTEL Functions
#########################################################################
# functions to get SNOTEL stations as geodataframe
def sites_asgdf(ulmo_getsites, stn_proj):
""" Convert ulmo.cuahsi.wof.get_sites response into a point GeoDataframe
"""
# Note: Found one SNOTEL site that was missing the location key
sites_df = pd.DataFrame.from_records([
OrderedDict(code=s['code'],
longitude=float(s['location']['longitude']),
latitude=float(s['location']['latitude']),
name=s['name'],
elevation_m=s['elevation_m'])
for _,s in ulmo_getsites.items()
if 'location' in s
])
sites_gdf = gpd.GeoDataFrame(
sites_df,
geometry=gpd.points_from_xy(sites_df['longitude'], sites_df['latitude']),
crs=stn_proj
)
return sites_gdf
def get_snotel_stns(domain):
#path to CSO domains
domains_resp = requests.get("https://raw.githubusercontent.com/snowmodel-tools/preprocess_python/master/CSO_domains.json")
domains = domains_resp.json()
#Snotel bounding box
Bbox = domains[domain]['Bbox']
# Snotel projection
stn_proj = domains[domain]['stn_proj']
# model projection
mod_proj = domains[domain]['mod_proj']
# Convert the bounding box dictionary to a shapely Polygon geometry using sgeom.box
box_sgeom = sgeom.box(Bbox['lonmin'], Bbox['latmin'], Bbox['lonmax'], Bbox['latmax'])
box_gdf = gpd.GeoDataFrame(geometry=[box_sgeom], crs=stn_proj)
# WaterML/WOF WSDL endpoint url
wsdlurl = "https://hydroportal.cuahsi.org/Snotel/cuahsi_1_1.asmx?WSDL"
# get dictionary of snotel sites
sites = ulmo.cuahsi.wof.get_sites(wsdlurl,user_cache=True)
#turn sites to geodataframe
snotel_gdf = sites_asgdf(sites,stn_proj)
#clip snotel sites to domain bounding box
gdf = gpd.sjoin(snotel_gdf, box_gdf, how="inner")
gdf.drop(columns='index_right', inplace=True)
gdf.reset_index(drop=True, inplace=True)
#add columns with projected coordinates
CSO_proj = gdf.to_crs(mod_proj)
gdf['easting'] = CSO_proj.geometry.x
gdf['northing'] = CSO_proj.geometry.y
return gdf
def fetch(sitecode, variablecode, start_date, end_date):
print(sitecode, variablecode, start_date, end_date)
values_df = None
wsdlurl = "https://hydroportal.cuahsi.org/Snotel/cuahsi_1_1.asmx?WSDL"
try:
#Request data from the server
site_values = ulmo.cuahsi.wof.get_values(
wsdlurl, 'SNOTEL:'+sitecode, variablecode, start=start_date, end=end_date
)
#Convert to a Pandas DataFrame
values_df = pd.DataFrame.from_dict(site_values['values'])
#Parse the datetime values to Pandas Timestamp objects
values_df['datetime'] = | pd.to_datetime(values_df['datetime']) | pandas.to_datetime |
# coding=utf-8
# !/usr/bin/env python3
import os, re
import numpy as np
import pandas as pd
from SimpleCalculate import simpleStatistics
from ReadUtils import readFile,svType,svLen,svEnd,processBar
def judgeIfOverlap(start_1,end_1,start_2,end_2,sv_type,refdist,overlap_rate=0.5):
#start_1 < end_1 && start_2 < end_2
if sv_type == 'INS':
return start_1 - refdist <= start_2 <= start_1 + refdist or \
start_1 - refdist <= end_2 <= start_1 + refdist
else:
start_max = max(start_1,start_2)
end_min = min(end_1,end_2)
overlap_range = end_min - start_max
SV_1_range = end_1 - start_1
SV_2_range = end_2 - start_2
return overlap_range/SV_1_range >=overlap_rate and overlap_range/SV_2_range >=overlap_rate
def getStartAndEnd(start,end):
start,end = min(start,end),max(start,end)
return start,end
def binarySearch(bench_df,home_pos,low,high):
left = int(low + (high - low)/2)
right = left + 1
if left == 0 or right == bench_df.shape[0] - 1:
return left,right
left_pos = bench_df['POS'].iloc[left]
right_pos = bench_df['POS'].iloc[right]
if left_pos <= home_pos <= right_pos:
return left,right
elif home_pos < left_pos:
return binarySearch(bench_df, home_pos, low, left)
else:
return binarySearch(bench_df, home_pos, right, high)
def judgeNeighbour(bench_df,home_pos,compared_sv_chrom,compared_sv_start,compared_sv_end,compared_sv_type,typeignore,refdist=200,overlap_rate=0.5):
flag = 0
if (bench_df.shape == ()):
if (home_pos < bench_df['POS'].iloc[0]):
left_neighbour_loc,right_neighbour_loc = None,0
else:
left_neighbour_loc,right_neighbour_loc = 0,None
else:
left_neighbour_loc,right_neighbour_loc = binarySearch(bench_df, home_pos, 0, bench_df.shape[0]-1)
if left_neighbour_loc is not None:
if bench_df['POS'].shape == ():
left_neighbour_end = svEnd(bench_df.to_frame().T.iloc[[left_neighbour_loc]])
left_neighbour_type = svType(bench_df.to_frame().T.iloc[[left_neighbour_loc]])
else:
left_neighbour_end = svEnd(bench_df.iloc[[left_neighbour_loc]])
left_neighbour_type = svType(bench_df.iloc[[left_neighbour_loc]])
left_neighbour_start,left_neighbour_end = getStartAndEnd(bench_df['POS'].iloc[left_neighbour_loc],left_neighbour_end)
if judgeIfOverlap(compared_sv_start, compared_sv_end, left_neighbour_start,left_neighbour_end,compared_sv_type,refdist,overlap_rate):
if typeignore == False:
if left_neighbour_type == compared_sv_type:
flag = 1
else:
flag = 1
if flag == 1:
return flag
if right_neighbour_loc is not None:
if bench_df['POS'].shape == ():
right_neighbour_end = svEnd(bench_df.to_frame().T.iloc[[right_neighbour_loc]])
right_neighbour_type = svType(bench_df.to_frame().T.iloc[[right_neighbour_loc]])
else:
right_neighbour_end = svEnd(bench_df.iloc[[right_neighbour_loc]])
right_neighbour_type = svType(bench_df.iloc[[right_neighbour_loc]])
right_neighbour_start,right_neighbour_end = getStartAndEnd(bench_df['POS'].iloc[right_neighbour_loc],right_neighbour_end)
if judgeIfOverlap(compared_sv_start, compared_sv_end, right_neighbour_start,right_neighbour_end,compared_sv_type,refdist,overlap_rate):
if typeignore == False:
if right_neighbour_type == compared_sv_type:
flag = 1
else:
flag = 1
return flag
def judgeIfSame(data_1,data_2,refdist,typeignore,overlap_rate,i):
flag = 0
data_1_sv_pos = int(data_1['POS'].iloc[i])
data_1_sv_chrom = str(data_1.index[i])
data_1_sv_end = svEnd(data_1.iloc[[i]])
data_1_sv_start,data_1_sv_end = getStartAndEnd(data_1_sv_pos, data_1_sv_end)
data_1_sv_type = svType(data_1.iloc[[i]])
# if the chrom is the same
if data_1_sv_chrom in data_2.index:
global bench_dict
bench_df = bench_dict[data_1_sv_chrom]
flag = judgeNeighbour(bench_df,data_1_sv_start,data_1_sv_chrom,data_1_sv_start,data_1_sv_end,data_1_sv_type,typeignore,refdist,overlap_rate)
if flag == 0 and (data_1_sv_type not in ['INS','None']):
flag = judgeNeighbour(bench_df,data_1_sv_end,data_1_sv_chrom,data_1_sv_start,data_1_sv_end,data_1_sv_type,typeignore,refdist,overlap_rate)
return flag
def preProcessData(bench_data):
global bench_dict
bench_dict = {}
for chrom in bench_data.index:
bench_dict[chrom] = bench_data.xs(chrom).sort_values(by = 'POS')
def compareTwo(file_name_1,file_name_2,out_dir,refdist,typeignore=False,overlap_rate=0.5):
# SV_1
data_1 = readFile(file_name_1)
# SV_2
data_2 = readFile(file_name_2)
print("Initialization Start!")
preProcessData(data_2)
print("Initialization Done!")
TP_sv = | pd.DataFrame(columns=data_1.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# **Teeme läbi väikesed harjutused, et hiljem oleks lihtsam kodutööd teha.**
#
#
# In[ ]:
import numpy as np
import pandas as pd
df = pd.read_csv("../input/cwurData.csv")
# 1) Leia kaggle’st dataset ‘World University Rankings’
#
# 2) Tee uus kernel (notebook)
#
# 3) Loe andmed dataseti failist ‘cwurData.csv’
#
# 4) Kuva andmed tabelina
# In[ ]:
df
# 5) Kuva tabelist read, mis käivad Eesti ülikoolide kohta
# In[ ]:
df.loc[df["country"] == "Estonia"]
# 6) Kuva keskmine hariduse kvaliteedi näitaja grupeerituna riikide kaupa
# In[ ]:
quality_of_edu_mean = pd.DataFrame(df.groupby('country').quality_of_education.mean())
quality_of_edu_mean
# 7) Järjesta saadud andmed keskmise hariduse kvaliteedi näitaja järgi kahanevalt
#
# Vihjed: Pane eelmise ülesande andmed uude DataFrame ning sorteeri uus DataFrame
# In[ ]:
quality_of_edu_mean.sort_values('quality_of_education', ascending=False)
# 8) Leida mitu korda iga riigi ülikoole tabelis esineb
#
# In[ ]:
uni_frequency = pd.DataFrame(df.groupby('country').size())
uni_frequency.rename(index=str, columns={0:"frequency"}, inplace=True)
uni_frequency.sort_values("frequency", ascending=False)
# 8) a) Leida täpsemalt ainult 2015. aasta tulemuste kohta
# In[ ]:
uni_frequency_2015 = pd.DataFrame(df[df.year == 2015].groupby('country').size())
uni_frequency_2015.rename(index=str, columns={0:"frequency"}, inplace=True)
uni_frequency_2015.sort_values("frequency", ascending=False)
# 9) Mitu ülikooli on välja andnud n publikatsiooni.
# In[ ]:
df["publications"].plot.hist(title="N publikatsiooni välja andnud ülikoolide arv", rwidth=0.9, grid=True, color="m");
# 10) Kuidas on seotud ülikoolide välja antud publikatsioonide arv tsiteerimiste arvuga.
# In[ ]:
publications_mean = pd.DataFrame(df.groupby('institution').publications.mean())
citations_mean = pd.DataFrame(df.groupby('institution').citations.mean())
info = np.array([publications_mean["publications"], citations_mean["citations"]])
scatter_table = | pd.DataFrame(data=info[0:], index=["publications", "citations"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This is a module for extending pandas dataframes with the modelflow toolbox
Created on Sat March 2019
@author: hanseni
"""
import pandas as pd
from collections import namedtuple
import inspect
from modelclass import model
import modelvis as mv
if not hasattr(pd.DataFrame,'mf'):
@ | pd.api.extensions.register_dataframe_accessor("mf") | pandas.api.extensions.register_dataframe_accessor |
"""
Filter and combine various peptide/MHC datasets to derive a composite training set,
optionally including eluted peptides identified by mass-spec.
"""
import sys
import argparse
import os
import json
import collections
from six.moves import StringIO
import pandas
from mhcflurry.common import normalize_allele_name
def normalize_allele_name_or_return_unknown(s):
return normalize_allele_name(
s,
raise_on_error=False,
default_value="UNKNOWN")
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--ms-item",
nargs="+",
action="append",
metavar="PMID FILE, ... FILE",
default=[],
help="Mass spec item to curate: PMID and list of files")
parser.add_argument(
"--expression-item",
nargs="+",
action="append",
metavar="LABEL FILE, ... FILE",
default=[],
help="Expression data to curate: dataset label and list of files")
parser.add_argument(
"--ms-out",
metavar="OUT.csv",
help="Out file path (MS data)")
parser.add_argument(
"--expression-out",
metavar="OUT.csv",
help="Out file path (RNA-seq expression)")
parser.add_argument(
"--expression-metadata-out",
metavar="OUT.csv",
help="Out file path for expression metadata, i.e. which samples used")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Leave user in pdb if PMID is unsupported")
PMID_HANDLERS = {}
EXPRESSION_HANDLERS = {}
def load(filenames, **kwargs):
result = {}
for filename in filenames:
if filename.endswith(".csv"):
result[filename] = pandas.read_csv(filename, **kwargs)
elif filename.endswith(".xlsx") or filename.endswith(".xls"):
result[filename] = pandas.read_excel(filename, **kwargs)
else:
result[filename] = filename
return result
def debug(*filenames):
loaded = load(filenames)
import ipdb
ipdb.set_trace()
def handle_pmid_27600516(filename):
"""Gloger, ..., Neri Cancer Immunol Immunother 2016 [PMID 27600516]"""
df = pandas.read_csv(filename)
sample_to_peptides = {}
current_sample = None
for peptide in df.peptide:
if peptide.startswith("#"):
current_sample = peptide[1:]
sample_to_peptides[current_sample] = []
else:
assert current_sample is not None
sample_to_peptides[current_sample].append(peptide.strip().upper())
rows = []
for (sample, peptides) in sample_to_peptides.items():
for peptide in sorted(set(peptides)):
rows.append([sample, peptide])
result_df = pandas.DataFrame(rows, columns=["sample_id", "peptide"])
result_df["sample_type"] = "melanoma_cell_line"
result_df["cell_line"] = result_df.sample_id
result_df["mhc_class"] = "I"
result_df["pulldown_antibody"] = "W6/32"
result_df["format"] = "multiallelic"
result_df["hla"] = result_df.sample_id.map({
"FM-82": "HLA-A*02:01 HLA-A*01:01 HLA-B*08:01 HLA-B*15:01 HLA-C*03:04 HLA-C*07:01",
"FM-93/2": "HLA-A*02:01 HLA-A*26:01 HLA-B*40:01 HLA-B*44:02 HLA-C*03:04 HLA-C*05:01",
"Mel-624": "HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-B*14:01 HLA-C*07:02 HLA-C*08:02",
"MeWo": "HLA-A*02:01 HLA-A*26:01 HLA-B*14:02 HLA-B*38:01 HLA-C*08:02 HLA-C*12:03",
"SK-Mel-5": "HLA-A*02:01 HLA-A*11:01 HLA-B*40:01 HLA-C*03:03",
})
return result_df
def handle_pmid_23481700(filename):
"""Hassan, ..., <NAME> Mol Cell Proteomics 2015 [PMID 23481700]"""
df = pandas.read_excel(filename, skiprows=10)
assert df["Peptide sequence"].iloc[0] == "TPSLVKSTSQL"
assert df["Peptide sequence"].iloc[-1] == "LPHSVNSKL"
hla = {
"JY": "HLA-A*02:01 HLA-B*07:02 HLA-C*07:02",
"HHC": "HLA-A*02:01 HLA-B*07:02 HLA-B*44:02 HLA-C*05:01 HLA-C*07:02",
}
results = []
for sample_id in ["JY", "HHC"]:
hits_df = df.loc[
df["Int %s" % sample_id].map(
lambda x: {"n.q.": 0, "n.q": 0}.get(x, x)).astype(float) > 0
]
result_df = pandas.DataFrame({
"peptide": hits_df["Peptide sequence"].dropna().values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = "B-LCL-" + sample_id
result_df["hla"] = hla[sample_id]
result_df["sample_type"] = "B-LCL"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
result_df["pulldown_antibody"] = "W6/32"
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
# Rename samples to avoid a collision with the JY sample in PMID 25576301.
result_df.sample_id = result_df.sample_id.map({
"JY": "JY.2015",
"HHC": "HHC.2015",
})
return result_df
def handle_pmid_24616531(filename):
"""Mommen, ..., Heck PNAS 2014 [PMID 24616531]"""
df = pandas.read_excel(filename, sheet_name="EThcD")
peptides = df.Sequence.values
assert peptides[0] == "APFLRIAF"
assert peptides[-1] == "WRQAGLSYIRYSQI"
result_df = pandas.DataFrame({
"peptide": peptides,
})
result_df["sample_id"] = "24616531"
result_df["sample_type"] = "B-LCL"
result_df["cell_line"] = "GR"
result_df["pulldown_antibody"] = "W6/32"
# Note: this publication lists hla as "HLA-A*01,-03, B*07,-27, and -C*02,-07"
# we are guessing the exact 4 digit alleles based on this.
result_df["hla"] = "HLA-A*01:01 HLA-A*03:01 HLA-B*07:02 HLA-B*27:05 HLA-C*02:02 HLA-C*07:01"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_25576301(filename):
"""Bassani-Sternberg, ..., Mann Mol Cell Proteomics 2015 [PMID 25576301]"""
df = pandas.read_excel(filename, sheet_name="Peptides")
peptides = df.Sequence.values
assert peptides[0] == "AAAAAAAQSVY"
assert peptides[-1] == "YYYNGKAVY"
column_to_sample = {}
for s in [c for c in df if c.startswith("Intensity ")]:
assert s[-2] == "-"
column_to_sample[s] = s.replace("Intensity ", "")[:-2].strip()
intensity_columns = list(column_to_sample)
rows = []
for _, row in df.iterrows():
x1 = row[intensity_columns]
x2 = x1[x1 > 0].index.map(column_to_sample).value_counts()
x3 = x2[x2 >= 2] # require at least two replicates for each peptide
for sample in x3.index:
rows.append((row.Sequence, sample))
result_df = pandas.DataFrame(rows, columns=["peptide", "sample_id"])
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
allele_map = {
'Fib': "HLA-A*03:01 HLA-A*23:01 HLA-B*08:01 HLA-B*15:18 HLA-C*07:02 HLA-C*07:04",
'HCC1937': "HLA-A*23:01 HLA-A*24:02 HLA-B*07:02 HLA-B*40:01 HLA-C*03:04 HLA-C*07:02",
'SupB15WT': None, # four digit alleles unknown, will drop sample
'SupB15RT': None,
'HCT116': "HLA-A*01:01 HLA-A*02:01 HLA-B*45:01 HLA-B*18:01 HLA-C*05:01 HLA-C*07:01",
# Homozygous at HLA-A:
'HCC1143': "HLA-A*31:01 HLA-A*31:01 HLA-B*35:08 HLA-B*37:01 HLA-C*04:01 HLA-C*06:02",
# Homozygous everywhere:
'JY': "HLA-A*02:01 HLA-A*02:01 HLA-B*07:02 HLA-B*07:02 HLA-C*07:02 HLA-C*07:02",
}
sample_type = {
'Fib': "fibroblast",
'HCC1937': "basal like breast cancer",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "colon carcinoma",
'HCC1143': "basal like breast cancer",
'JY': "B-cell",
}
cell_line = {
'Fib': None,
'HCC1937': "HCC1937",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "HCT116",
'HCC1143': "HCC1143",
'JY': "JY",
}
result_df["hla"] = result_df.sample_id.map(allele_map)
print("Entries before dropping samples with unknown alleles", len(result_df))
result_df = result_df.loc[~result_df.hla.isnull()]
print("Entries after dropping samples with unknown alleles", len(result_df))
result_df["sample_type"] = result_df.sample_id.map(sample_type)
result_df["cell_line"] = result_df.sample_id.map(cell_line)
print(result_df.head(3))
return result_df
def handle_pmid_26992070(*filenames):
"""Ritz, ..., Fugmann Proteomics 2016 [PMID 26992070]"""
# Although this publication seems to suggest that HEK293 are C*07:02
# (figure 3B), in a subsequent publication [PMID 28834231] this group
# gives the HEK293 HLA type as HLA‐A*03:01, HLA‐B*07:02, and HLA‐C*07:01.
# We are therefore using the HLA‐C*07:01 (i.e. the latter) typing results
# here.
allele_text = """
Cell line HLA-A 1 HLA-A 2 HLA-B 1 HLA-B 2 HLA-C 1 HLA-C 2
HEK293 03:01 03:01 07:02 07:02 07:01 07:01
HL-60 01:01 01:01 57:01 57:01 06:02 06:02
RPMI8226 30:01 68:02 15:03 15:10 02:10 03:04
MAVER-1 24:02 26:01 38:01 44:02 05:01 12:03
THP-1 02:01 24:02 15:11 35:01 03:03 03:03
"""
allele_info = pandas.read_csv(
StringIO(allele_text), sep="\t", index_col=0)
allele_info.index = allele_info.index.str.strip()
for gene in ["A", "B", "C"]:
for num in ["1", "2"]:
allele_info[
"HLA-%s %s" % (gene, num)
] = "HLA-" + gene + "*" + allele_info["HLA-%s %s" % (gene, num)]
cell_line_to_allele = allele_info.apply(" ".join, axis=1)
sheets = {}
for f in filenames:
if f.endswith(".xlsx"):
d = pandas.read_excel(f, sheet_name=None, skiprows=1)
sheets.update(d)
dfs = []
for cell_line in cell_line_to_allele.index:
# Using data from DeepQuanTR, which appears to be a consensus between
# two other methods used.
sheet = sheets[cell_line + "_DeepQuanTR"]
replicated = sheet.loc[
sheet[[c for c in sheet if "Sample" in c]].fillna(0).sum(1) > 1
]
df = pandas.DataFrame({
'peptide': replicated.Sequence.values
})
df["sample_id"] = cell_line
df["hla"] = cell_line_to_allele.get(cell_line)
dfs.append(df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["pulldown_antibody"] = "W6/32"
result_df["cell_line"] = result_df["sample_id"]
result_df["sample_type"] = result_df.sample_id.map({
"HEK293": "hek",
"HL-60": "neutrophil",
"RPMI8226": "b-cell",
"MAVER-1": "b-LCL",
"THP-1": "monocyte",
})
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_27412690(filename):
"""Shraibman, ..., Admon Mol Cell Proteomics 2016 [PMID 27412690]"""
hla_types = {
"U-87": "HLA-A*02:01 HLA-B*44:02 HLA-C*05:01",
"T98G": "HLA-A*02:01 HLA-B*39:06 HLA-C*07:02",
"LNT-229": "HLA-A*03:01 HLA-B*35:01 HLA-C*04:01",
}
sample_id_to_cell_line = {
"U-87": "U-87",
"T98G": "T98G",
"LNT-229": "LNT-229",
"U-87+DAC": "U-87",
"T98G+DAC": "T98G",
"LNT-229+DAC": "LNT-229",
}
df = pandas.read_excel(filename)
assert df.Sequence.iloc[0] == "AAAAAAGSGTPR"
intensity_col_to_sample_id = {}
for col in df:
if col.startswith("Intensity "):
sample_id = col.split()[1]
assert sample_id in sample_id_to_cell_line, (col, sample_id)
intensity_col_to_sample_id[col] = sample_id
dfs = []
for (sample_id, cell_line) in sample_id_to_cell_line.items():
intensity_cols = [
c for (c, v) in intensity_col_to_sample_id.items()
if v == sample_id
]
hits_df = df.loc[
(df[intensity_cols] > 0).sum(1) > 1
]
result_df = pandas.DataFrame({
"peptide": hits_df.Sequence.values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = cell_line
result_df["hla"] = hla_types[cell_line]
dfs.append(result_df)
result_df = | pandas.concat(dfs, ignore_index=True) | pandas.concat |
import logging
import traceback
import pandas as pd
import numpy as np
import seaborn as sns
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.ticker as ticker
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
import inStrain.plotting.utilities
from inStrain.plotting.utilities import plot_genome
from inStrain.plotting.utilities import estimate_breadth
def genome_plot_from_IS(IS, plot_dir=False, **kwargs):
# Load the required data
try:
stb = IS.get('scaffold2bin')
b2s = defaultdict(list)
for s, b in stb.items():
b2s[b].append(s)
assert len(b2s.keys()) > 0
# Load the cache
covTs = kwargs.get('covT')#, IS.get('covT'))
clonTs = kwargs.get('clonT')#, IS.get('clonT'))
raw_linkage_table = kwargs.get('raw_linkage_table')#, IS.get('raw_linkage_table'))
cumulative_snv_table = kwargs.get('cumulative_snv_table')#, IS.get('cumulative_snv_table'))
scaffold2length = IS.get('scaffold2length')
rl = IS.get_read_length()
profiled_scaffolds = set(scaffold2length.keys())
except:
logging.error("Skipping plot 2 - you don't have all required information. You need to run inStrain genome_wide first")
traceback.print_exc()
return
# Make the plot
logging.info("Plotting plot 2")
name = 'genomeWide_microdiveristy_metrics.pdf'
pp = PdfPages(plot_dir + name)
for genome, scaffolds in b2s.items():
if not plot_genome(genome, IS, **kwargs):
continue
present_scaffolds = list(set(scaffolds).intersection(set(profiled_scaffolds)))
Wdb, breaks, midpoints = load_windowed_metrics(present_scaffolds,
scaffold2length,
rl,
report_midpoints=True,
covTs=covTs, clonTs=clonTs,
raw_linkage_table=raw_linkage_table,
cumulative_snv_table=cumulative_snv_table)
if len(Wdb) == 0:
logging.debug(f"{genome} could not have windowed metrics loaded")
continue
genomeWide_microdiveristy_metrics_plot(Wdb, breaks, title=genome)
fig = plt.gcf()
fig.set_size_inches(8, 5)
fig.tight_layout()
pp.savefig(fig)#, bbox_inches='tight')
#plt.show()
plt.close(fig)
# Save the figure
pp.close()
#plt.show()
plt.close('all')
def scaffold_inspection_from_IS(IS, plot_dir=False, **kwargs):
# Load the required data
try:
stb = IS.get('scaffold2bin')
b2s = defaultdict(list)
for s, b in stb.items():
b2s[b].append(s)
assert len(b2s.keys()) > 0
# Load the cache
covTs = kwargs.get('covTs', IS.get('covT'))
clonTs = kwargs.get('clonTs', IS.get('clonT'))
raw_linkage_table = kwargs.get('raw_linkage_table', IS.get('raw_linkage_table'))
cumulative_snv_table = kwargs.get('cumulative_snv_table', IS.get('cumulative_snv_table'))
scaffold2length = IS.get('scaffold2length')
rl = IS.get_read_length()
profiled_scaffolds = set(scaffold2length.keys())
except:
logging.error("Skipping plot 7 - you don't have all required information. You need to run inStrain genome_wide first")
traceback.print_exc()
return
# Make the plot
logging.info("Plotting plot 7")
name = 'ScaffoldInspection_plot.pdf'
pp = PdfPages(plot_dir + name)
for genome, scaffolds in b2s.items():
if not plot_genome(genome, IS, **kwargs):
continue
present_scaffolds = list(set(scaffolds).intersection(set(profiled_scaffolds)))
Wdb, breaks, midpoints = load_windowed_metrics(present_scaffolds,
scaffold2length,
rl,
report_midpoints=True,
covTs=covTs, clonTs=clonTs,
raw_linkage_table=raw_linkage_table,
cumulative_snv_table=cumulative_snv_table)
if len(Wdb) == 0:
logging.debug(f"{genome} could not have windowed metrics loaded")
continue
scaffold_inspection_plot(Wdb, breaks, midpoints, title=genome)
fig = plt.gcf()
fig.tight_layout()
pp.savefig(fig)#, bbox_inches='tight')
#plt.show()
plt.close(fig)
# Save the figure
pp.close()
#plt.show()
plt.close('all')
def genomeWide_microdiveristy_metrics_plot(Wdb, breaks, title=''):
'''
Make the multiple metrics plot
'''
# Get set up for multiple rows
i = len(Wdb['metric'].unique())
if i > 1:
fig, ax = plt.subplots(i, 1, sharex=True)
else:
ax = {}
ax[0] = plt.gca()
i = 0
for metric in ['linkage', 'snp_density', 'coverage', 'nucl_diversity']:
#for metric, wdb in Wdb.groupby('metric'):
if metric not in set(Wdb['metric'].tolist()):
continue
wdb = Wdb[Wdb['metric'] == metric]
med = wdb['value'].median()
# Rotate colors:
colors = ['red', 'blue', 'black']
c = 0
for mm, ddb in wdb.groupby('ANI'):
ax[i].plot(ddb['midpoint'], ddb['value'], c=colors[c], label=mm, marker='o', ms=1)#, ls='')
c += 1
ax[i].set_title("{0}".format(metric))
ax[i].grid(False)
if i == 0:
ax[i].legend(loc='upper left', title='Min read ANI (%)')
# Add breaks
for b in breaks:
ax[i].axvline(b, ls='-', c='lightgrey', zorder=-1)
i += 1
plt.xlabel('genome position')
plt.xlim(0, Wdb['midpoint'].max())
plt.suptitle(title, y=0.999)
plt.subplots_adjust(hspace=0.3)
def load_windowed_metrics(scaffolds, s2l, rLen, metrics=None, window_len=None, ANI_levels=[0, 100],
min_scaff_len=0, report_midpoints=False, covTs=False, clonTs=False,
raw_linkage_table=False, cumulative_snv_table=False):
if metrics is None:
metrics = ['coverage', 'nucl_diversity', 'linkage', 'snp_density']
if type(metrics) != type([]):
print("Metrics must be a list")
return
# Figure out the MMs needed
#rLen = IS.get_read_length()
mms = [_get_mm(None, ANI, rLen=rLen) for ANI in ANI_levels]
# Sort the scaffolds
#s2l = IS.get('scaffold2length')
scaffolds = sorted(scaffolds, key=s2l.get, reverse=True)
if min_scaff_len > 0:
scaffolds = [s for s in scaffolds if s2l[s] >= min_scaff_len]
# Figure out the window length
if window_len == None:
window_len = int(sum([s2l[s] for s in scaffolds]) / 100)
else:
window_len = int(window_len)
# Calculate the breaks
breaks = []
midpoints = {}
tally = 0
for scaffold in scaffolds:
midpoints[scaffold] = tally + int(s2l[scaffold] / 2)
tally += s2l[scaffold]
breaks.append(tally)
dbs = []
if 'coverage' in metrics:
if covTs == False:
logging.error("need covTs for coverage")
raise Exception
cdb = load_windowed_coverage_or_clonality('coverage', covTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'coverage'
dbs.append(cdb)
# if 'clonality' in metrics:
# cdb = load_windowed_coverage_or_clonality(IS, 'clonality', scaffolds, window_len, mms, ANI_levels, s2l)
# cdb['metric'] = 'clonality'
# dbs.append(cdb)
if 'nucl_diversity' in metrics:
if clonTs == False:
logging.error("need clonTs for microdiversity")
raise Exception
cdb = load_windowed_coverage_or_clonality('nucl_diversity', clonTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'nucl_diversity'
dbs.append(cdb)
if 'linkage' in metrics:
if raw_linkage_table is False:
logging.error("need raw_linkage_table for linkage")
raise Exception
cdb = load_windowed_linkage(raw_linkage_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'linkage'
dbs.append(cdb)
if 'snp_density' in metrics:
if cumulative_snv_table is False:
logging.error("need cumulative_snv_table for snp_density")
raise Exception
if len(cumulative_snv_table) > 0:
cdb = load_windowed_SNP_density(cumulative_snv_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'snp_density'
dbs.append(cdb)
if len(dbs) > 0:
Wdb = pd.concat(dbs, sort=True)
Wdb = Wdb.rename(columns={'avg_cov':'value'})
else:
Wdb = pd.DataFrame()
# Add blanks at the breaks
table = defaultdict(list)
for mm, ani in zip(mms, ANI_levels):
for metric in Wdb['metric'].unique():
for bre in breaks:
table['scaffold'].append('break')
table['mm'].append(mm)
table['ANI'].append(ani)
table['adjusted_start'].append(bre) # The minus one makes sure it doenst split things it shouldnt
table['adjusted_end'].append(bre)
table['value'].append(np.nan)
table['metric'].append(metric)
bdb = | pd.DataFrame(table) | pandas.DataFrame |
import os
import datetime
import numpy as np
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
import seaborn as sns
from ._differential import compute_levelWise_differential_analysis
from ._pseudo import createBins, createSuperbins
from ._visualize import heatmap
from ._enrich import module_enrich_ranked, module_enrich, apply_toppcluster
class Shred:
"""
An object of a shred job, including anndata, bin table, gene module lists and shred plan.
Parameters
----------
adata
anndata for shred
shred_plan
shred plan for anndata. e.g. ["stim", "cell", "stim+cell|stim"]
bin_group
groups for binning plan in visualization
order_bins
orders of bins
order_modules
orders of modules
method
statistical methods for differential expression analysis
"""
def __init__(
self,
adata,
shred_plan,
bin_group,
bin_num = 1000,
bin_min_cells = 5,
order_bins = None,
order_modules = None,
method = "wilcoxon",
output_dir = "./",
save_output = True
):
self.adata = adata
self.shred_plan = shred_plan # plan for shred (gene module generation)
self.bin_group = bin_group # the way to make pseudo-bulk bins
self.bin_num = 1000
self.bin_min_cells = 5
self.order_modules = order_modules # the order of modules in heatmap
self.method = method
self.save_output = save_output
self.order_bins = self.bin_group if order_bins == None else order_bins
self.shred_module = {}
self.module_groups = get_all_terms(shred_plan)
# create bins for heatmap visualization
self.bin_metadata, self.bin_matrix = createBins(adata, bin_by = bin_group,
min_cells = bin_min_cells, target_totalBins = bin_num)
self.superbin_metadata, self.superbin_matrix = createSuperbins(adata, bin_by = bin_group)
if output_dir != None:
if not os.path.isdir(output_dir + "/output"):
self.output_folder = output_dir + "/output/"
else:
self.output_folder = output_dir + "/output_" + str(datetime.datetime.now()) + "/"
os.mkdir(self.output_folder)
if self.save_output:
self.bin_metadata.to_csv(self.output_folder + "bin_metadata.txt", sep = "\t")
self.bin_matrix.to_csv(self.output_folder + "bin_matrix.txt", sep = "\t")
self.superbin_metadata.to_csv(self.output_folder + "superbin_metadata.txt", sep = "\t")
self.superbin_matrix.to_csv(self.output_folder + "superbin_matrix.txt", sep = "\t")
def do_shredplan(self):
"""
Run the user-customized shred plan, where hierarchical gene modules will be generated.
"""
df_deg_combined = | pd.DataFrame() | pandas.DataFrame |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sys
from os.path import join as pjoin
import scanpy as sc
import anndata
import time
# sys.path.append("../../..")
sys.path.append("../../../data")
from st.load_st_data import load_st_data
sys.path.append("../../../gpsa/models")
# from vgpsa import VariationalGPSA
# from gpsa import matern12_kernel, rbf_kernel
from gpsa import VariationalGPSA, matern12_kernel, rbf_kernel
from gpsa.plotting import callback_oned, callback_twod, callback_twod_aligned_only
## For PASTE
import scanpy as sc
import anndata
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
from sklearn.metrics import r2_score
def scale_spatial_coords(X, max_val=10.0):
X = X - X.min(0)
X = X / X.max(0)
return X * max_val
DATA_DIR = "../../../data/st/"
N_GENES = 20
N_SAMPLES = None
N_LAYERS = 4
fixed_view_idx = [0, 2, 3]
n_spatial_dims = 3
n_views = 4
m_G = 200
m_X_per_view = 200
N_LATENT_GPS = {"expression": None}
N_EPOCHS = 5000
PRINT_EVERY = 25
def process_data(adata, n_top_genes=2000):
adata.var_names_make_unique()
adata.var["mt"] = adata.var_names.str.startswith("MT-")
sc.pp.calculate_qc_metrics(adata, qc_vars=["mt"], inplace=True)
sc.pp.filter_cells(adata, min_counts=100)
# sc.pp.filter_cells(adata, max_counts=35000)
# adata = adata[adata.obs["pct_counts_mt"] < 20]
# sc.pp.filter_genes(adata, min_cells=10)
sc.pp.normalize_total(adata, inplace=True)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(
adata, flavor="seurat", n_top_genes=n_top_genes, subset=True
)
return adata
data_slice1, data_slice2, data_slice3, data_slice4 = load_st_data(
layers=np.arange(N_LAYERS) + 1
)
process_data(data_slice1, n_top_genes=3000)
process_data(data_slice2, n_top_genes=3000)
process_data(data_slice3, n_top_genes=3000)
process_data(data_slice4, n_top_genes=3000)
## Save original data
plt.figure(figsize=(20, 5))
for ii, curr_slice in enumerate([data_slice1, data_slice2, data_slice3, data_slice4]):
plt.subplot(1, 4, ii + 1)
plt.scatter(
curr_slice.obsm["spatial"][:, 0], curr_slice.obsm["spatial"][:, 1], s=30
)
plt.title("Slice {}".format(ii + 1), fontsize=30)
plt.axis("off")
plt.savefig("./out/st_original_slices.png")
# plt.show()
plt.close()
data = anndata.AnnData.concatenate(data_slice1, data_slice2, data_slice3, data_slice4)
# plt.figure(figsize=(5, 5))
# plt.scatter(data[data.obs["batch"] == "0"].obsm["spatial"][:, 0], data[data.obs["batch"] == "0"].obsm["spatial"][:, 1])
# plt.scatter(data[data.obs["batch"] == "1"].obsm["spatial"][:, 0], data[data.obs["batch"] == "1"].obsm["spatial"][:, 1])
# plt.show()
# import ipdb; ipdb.set_trace()
shared_gene_names = data.var.gene_ids.index.values
data_knn = data_slice1[:, shared_gene_names]
X_knn = data_knn.obsm["spatial"]
Y_knn = data_knn.X
Y_knn = (Y_knn - Y_knn.mean(0)) / Y_knn.std(0)
# nbrs = NearestNeighbors(n_neighbors=2).fit(X_knn)
# distances, indices = nbrs.kneighbors(X_knn)
knn = KNeighborsRegressor(n_neighbors=10, weights="uniform").fit(X_knn, Y_knn)
preds = knn.predict(X_knn)
r2_vals = r2_score(Y_knn, preds, multioutput="raw_values")
gene_idx_to_keep = np.where(r2_vals > 0.3)[0]
N_GENES = min(N_GENES, len(gene_idx_to_keep))
gene_names_to_keep = data_knn.var.gene_ids.index.values[gene_idx_to_keep]
gene_names_to_keep = gene_names_to_keep[np.argsort(-r2_vals[gene_idx_to_keep])]
r2_vals_sorted = -1 * np.sort(-r2_vals[gene_idx_to_keep])
if N_GENES < len(gene_names_to_keep):
gene_names_to_keep = gene_names_to_keep[:N_GENES]
data = data[:, gene_names_to_keep]
# for ii, gene_name in enumerate(gene_names_to_keep):
# print(r2_vals_sorted[ii], flush=True)
# sc.pl.spatial(data_knn, img_key=None, color=[gene_name], spot_size=1)
n_samples_list = [
data_slice1.shape[0],
data_slice2.shape[0],
data_slice3.shape[0],
data_slice4.shape[0],
]
cumulative_sum = np.cumsum(n_samples_list)
cumulative_sum = np.insert(cumulative_sum, 0, 0)
view_idx = [
np.arange(cumulative_sum[ii], cumulative_sum[ii + 1]) for ii in range(n_views)
]
X_list = []
Y_list = []
for vv in range(n_views):
curr_X = np.array(data[data.obs.batch == str(vv)].obsm["spatial"])
curr_Y = data[data.obs.batch == str(vv)].X
curr_X = scale_spatial_coords(curr_X)
curr_Y = (curr_Y - curr_Y.mean(0)) / curr_Y.std(0)
X_list.append(curr_X)
Y_list.append(curr_Y)
for vv in range(n_views):
curr_X = X_list[vv]
curr_X = np.concatenate(
[curr_X, np.ones(curr_X.shape[0]).reshape(-1, 1) * vv], axis=1
)
X_list[vv] = curr_X
aligned_coords = | pd.read_csv("./out/aligned_coords_st_3d.csv", index_col=0) | pandas.read_csv |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = | date_range('1/1/2000', periods=10) | pandas.date_range |
import os, glob, sys, io
import numpy as np
import pandas as pd # Timeseries data
import datetime as dt # Time manipulation
import yaml
from matplotlib.dates import date2num # Convert dates to matplotlib axis coords
from matplotlib import dates
from scipy import fftpack
from scipy import stats
from bin.tools import *
def init(config_file):
# Read configuration
with open(r'%s' % config_file) as file:
config_list = yaml.load(file, Loader=yaml.FullLoader)
src = config_list['sources']['ebas_ozone']
src_svanvik_OzoNorClim = config_list['sources']['svanvik_ozone']
src_rra = config_list['sources']['regional_ozone']
station_list = config_list['station_list']
workflow = config_list['workflow']
file.close()
# Read data
try:
data = {}
for station in station_list:
if station=='Barrow':
data.update({station:load_data(src+station+'/*', type="Barrow")})
else:
data.update({station:load_data(src+station+'/*.nas')})
except NameError:
sys.exit("Can't load ozone station data please check your source directory!")
# Concate Jergul and Karasjok data
data.update({'jergkara':pd.concat((data['Jergul'], data['Karasjok']))})
# Read and convert xls file data
data_svanvik_OzoNorClim = []
for file in sorted(glob.glob(src_svanvik_OzoNorClim)):
tmp_data_svanvik = pd.read_excel(file, index_col=0, header=0)
data_svanvik_OzoNorClim.append(tmp_data_svanvik['O3_mugm-3'].where(tmp_data_svanvik['O3_mugm-3']>=0.5).dropna()/2.)
# Concat data Svanvik data
data.update({'svanvik_OzoNorClim':pd.concat(data_svanvik_OzoNorClim)})
# Load regional model reanalysis 2018 and set time axis
try:
data_rra = xr.open_dataset(src_rra)
data_rra['time'] = pd.date_range("2018-01-01", periods=365*24, freq='H')
data.update({'rra':data_rra})
except NameError:
print("Warning: Can't load regional data please check your source directory!")
return(data, workflow)
def extract_station_data(data, station_list):
from bin.station_info import station_location
local_rra = {}
for each in station_list:
local_rra.update({each:data['rra'].sel(lat=station_location[each].lat, lon=station_location[each].lon, method='nearest', time='2018-07')['O3']*0.5})
return(local_rra)
def compute_time_lag(data):
time_lag = range(-32,33)
lag_jergkara_esrange = []
lag_jergkara_pallas = []
lag_svanvik_esrange = []
lag_svanvik_pallas = []
lag_svanvik_jergkara = []
lag_label = ("jergkara_esrange","jergkara_pallas","svanvik_esrange","svanvik_pallas","svanvik_jergkara")
for i in time_lag:
lag_jergkara_esrange.append(time_lagged_corr(data['jergkara'], data['Esrange'], lag=i, pandas=True))
lag_jergkara_pallas.append(time_lagged_corr(data['jergkara'], data['Pallas'], lag=i, pandas=True))
lag_svanvik_esrange.append(time_lagged_corr(data['Svanvik'], data['Esrange'], lag=i, pandas=True))
lag_svanvik_pallas.append(time_lagged_corr(data['Svanvik'], data['Pallas'], lag=i, pandas=True))
lag_svanvik_jergkara.append(time_lagged_corr(data['Svanvik'], data['jergkara'], lag=i, pandas=True))
# Print maximum in lag
lag_max = {}
print("Lag correlation")
for i,lag in zip(lag_label,(lag_jergkara_esrange, lag_jergkara_pallas, lag_svanvik_esrange, lag_svanvik_pallas, lag_svanvik_jergkara)):
lag_max.update({i:np.array(time_lag)[np.where(np.array(lag)==np.array(lag).max())[0]][0]})
print("%s max at %d h" % (i, lag_max[i]))
return(lag_max)
def compute_clim(data):
doys = np.arange(1,367)
# Climatology from Esrange, Pallas, Jergul/Karasjok data
climatology = pd.concat((data['Esrange'][:'2012'], data['Pallas'][:'2012'], data['jergkara'][:'2012']))
# Daily mean climatology from Esrange, Pallas, Jergul/Karasjok data
yozone, yerr, yerr_mean = compute_climatology(climatology)
yozone_max, yerr_max, yerr_mean_max = compute_climatology(climatology, mode='max')
yozone_min, yerr_min, yerr_mean_min = compute_climatology(climatology, mode='min')
# Svanvik climatology
yozone_svanvik, yerr_svanvik, yerr_mean_svanvik = compute_climatology(data['Svanvik'])
yozone_max_svanvik, yerr_max_svanvik, yerr_mean_max_svanvik = compute_climatology(data['Svanvik'], mode='max')
yozone_min_svanvik, yerr_min_svanvik, yerr_mean_min_svanvik = compute_climatology(data['Svanvik'], mode='min')
# Hourly climatology
clim_hourly, clim_hourly_err, clim_hourly_err_mean = compute_climatology(climatology, mode='hourly')
clim_hourly_svanvik, clim_hourly_err_svanvik, clim_hourly_err_mean_svanvik = compute_climatology(data['Svanvik'], mode='hourly')
# Compute spline fits
from scipy.interpolate import UnivariateSpline
# Fennoscandic climatology
w = 1/yerr_mean
fitSpl_dmean = UnivariateSpline(doys, climatology.groupby(climatology.index.dayofyear).apply(np.nanmean), w=w)
dmax = climatology.resample('1d').apply(np.nanmax)
fitSpl_dmax = UnivariateSpline(doys, dmax.groupby(dmax.index.dayofyear).apply(np.nanmean))
# Svanvik
w_svanvik = 1/yerr_mean_svanvik
fitSpl_dmean_svanvik = UnivariateSpline(doys, data['Svanvik'].groupby(data['Svanvik'].index.dayofyear).apply(np.nanmean), w=w_svanvik)
dmax_svanvik = data['Svanvik'].resample('1d').apply(np.nanmax)
fitSpl_dmax_svanvik = UnivariateSpline(doys, dmax_svanvik.groupby(dmax_svanvik.index.dayofyear).apply(np.nanmean))
# Pickle splines for comparison with other data
import pickle
with open('obs_climatologies.pkl','wb') as output:
pickle.dump(fitSpl_dmean, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(fitSpl_dmean_svanvik, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(yerr_mean, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(yerr_mean_svanvik, output, pickle.HIGHEST_PROTOCOL)
return({'clim':clim_hourly, 'clim_err':clim_hourly_err, 'clim_err_mean':clim_hourly_err_mean},
{'clim':clim_hourly_svanvik, 'clim_err':clim_hourly_err_svanvik, 'clim_err_mean':clim_hourly_err_mean_svanvik})
def sample_climatology(clim, clim_svanvik):
# Sample from houerly climatology
sample_clim_svanvik = pd.DataFrame(pd.concat((clim_svanvik.iloc[:(31+28)*24],clim_svanvik.iloc[(31+29)*24:])).values, index=pd.date_range("2018-01-01 0:0", "2018-12-31 23:0", freq='H'))
sample_clim = pd.DataFrame( | pd.concat((clim.iloc[:(31+28)*24],clim.iloc[(31+29)*24:])) | pandas.concat |
"""
"""
import importlib
import os
import pydoc
import shutil
import subprocess
from datetime import datetime
from json.decoder import JSONDecodeError
from multiprocessing import Pool
import click
import terra.database as tdb
from terra import Task, _get_task_dir
from terra.settings import TERRA_CONFIG
from terra.utils import ensure_dir_exists
@click.group()
@click.option("--module", default=None)
@click.option("--fn", default=None)
@click.option("--status", default=None)
@click.option("--run_ids", "-r", type=str, default=None)
@click.option("--start_date", type=str, default=None)
@click.option("--end_date", type=str, default=None)
@click.option("--limit", type=int, default=1_000)
@click.pass_context
def cli(
ctx,
module: str,
fn: str,
run_ids: str,
status: str,
start_date: str,
end_date: str,
limit: int,
):
ctx.ensure_object(dict)
ctx.obj["modules"] = module
ctx.obj["fns"] = fn
ctx.obj["statuses"] = status
ctx.obj["limit"] = limit
if run_ids is not None:
run_ids = map(int, run_ids.split(","))
ctx.obj["run_ids"] = run_ids
ctx.obj["date_range"] = None
date_format = "%m-%d-%Y"
if start_date is not None and end_date is not None:
ctx.obj["date_range"] = (
datetime.strptime(start_date, date_format),
datetime.strptime(end_date, date_format),
)
@cli.command()
@click.option("--bucket_name", "-b", type=str, default=None)
@click.option("--force", "-f", is_flag=True, default=False)
@click.option("--num_workers", type=int, default=0)
@click.pass_context
def push(ctx, bucket_name: str, force: bool, num_workers: int):
from terra.remote import push
push(**ctx.obj, bucket_name=bucket_name, force=force, num_workers=num_workers)
@cli.command()
@click.pass_context
@click.option("--bucket_name", "-b", type=str, default=None)
def pull(ctx, bucket_name: str):
from terra.remote import pull
pull(**ctx.obj, bucket_name=bucket_name)
@cli.command()
@click.pass_context
def ls(ctx):
import pandas as pd
runs = tdb.get_runs(**ctx.obj, df=False)
if len(runs) == 0:
print("Query returned no tasks.")
return
df = | pd.DataFrame([run.__dict__ for run in runs]) | pandas.DataFrame |
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
from bdshare.util import vars as vs
def get_current_trade_data(symbol=None, retry_count=1, pause=0.001):
"""
get last stock price.
:param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'
:return: dataframecd
"""
for _ in range(retry_count):
time.sleep(pause)
try:
r = requests.get(vs.DSE_URL+vs.DSE_LSP_URL)
if r.status_code != 200:
r = requests.get(vs.DSE_ALT_URL+vs.DSE_LSP_URL)
except Exception as e:
print(e)
else:
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
# print(table)
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'symbol': cols[1].text.strip().replace(",", ""),
'ltp': cols[2].text.strip().replace(",", ""),
'high': cols[3].text.strip().replace(",", ""),
'low': cols[4].text.strip().replace(",", ""),
'close': cols[5].text.strip().replace(",", ""),
'ycp': cols[6].text.strip().replace(",", ""),
'change': cols[7].text.strip().replace("--", "0"),
'trade': cols[8].text.strip().replace(",", ""),
'value': cols[9].text.strip().replace(",", ""),
'volume': cols[10].text.strip().replace(",", "")
})
df = pd.DataFrame(quotes)
if symbol:
df = df.loc[df.symbol == symbol.upper()]
return df
else:
return df
def get_dsex_data(symbol=None, retry_count=1, pause=0.001):
"""
get dseX share price.
:param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'
:return: dataframe
"""
for _ in range(retry_count):
time.sleep(pause)
try:
r = requests.get(vs.DSE_URL+vs.DSEX_INDEX_VALUE)
if r.status_code != 200:
r = requests.get(vs.DSE_ALT_URL+vs.DSEX_INDEX_VALUE)
except Exception as e:
print(e)
else:
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table'})
# print(table)
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'symbol': cols[1].text.strip().replace(",", ""),
'ltp': cols[2].text.strip().replace(",", ""),
'high': cols[3].text.strip().replace(",", ""),
'low': cols[4].text.strip().replace(",", ""),
'close': cols[5].text.strip().replace(",", ""),
'ycp': cols[6].text.strip().replace(",", ""),
'change': cols[7].text.strip().replace("--", "0"),
'trade': cols[8].text.strip().replace(",", ""),
'value': cols[9].text.strip().replace(",", ""),
'volume': cols[10].text.strip().replace(",", "")
})
df = pd.DataFrame(quotes)
if symbol:
df = df.loc[df.symbol == symbol.upper()]
return df
else:
return df
def get_current_trading_code():
"""
get last stock codes.
:return: dataframe
"""
try:
r = requests.get(vs.DSE_URL+vs.DSE_LSP_URL)
if r.status_code != 200:
r = requests.get(vs.DSE_ALT_URL+vs.DSE_LSP_URL)
except Exception as e:
print(e)
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'symbol': cols[1].text.strip().replace(",", "")})
df = pd.DataFrame(quotes)
return df
def get_hist_data(start=None, end=None, code='All Instrument'):
"""
get historical stock price.
:param start: str, Start date e.g.: '2020-03-01'
:param end: str, End date e.g.: '2020-03-02'
:param code: str, Instrument symbol e.g.: 'ACI'
:return: dataframe
"""
# data to be sent to post request
data = {'startDate': start,
'endDate': end,
'inst': code,
'archive': 'data'}
try:
r = requests.get(url=vs.DSE_URL+vs.DSE_DEA_URL, params=data)
if r.status_code != 200:
r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_DEA_URL, params=data)
except Exception as e:
print(e)
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.content, 'html5lib')
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'date': cols[1].text.strip().replace(",", ""),
'symbol': cols[2].text.strip().replace(",", ""),
'ltp': cols[3].text.strip().replace(",", ""),
'high': cols[4].text.strip().replace(",", ""),
'low': cols[5].text.strip().replace(",", ""),
'open': cols[6].text.strip().replace(",", ""),
'close': cols[7].text.strip().replace(",", ""),
'ycp': cols[8].text.strip().replace(",", ""),
'trade': cols[9].text.strip().replace(",", ""),
'value': cols[10].text.strip().replace(",", ""),
'volume': cols[11].text.strip().replace(",", "")
})
df = pd.DataFrame(quotes)
if 'date' in df.columns:
df = df.set_index('date')
df = df.sort_index(ascending=False)
else:
print('No data found')
return df
def get_basic_hist_data(start=None, end=None, code='All Instrument', index=None, retry_count=1, pause=0.001):
"""
get historical stock price.
:param start: str, Start date e.g.: '2020-03-01'
:param end: str, End date e.g.: '2020-03-02'
:param code: str, Instrument symbol e.g.: 'ACI'
:param retry_count : int, e.g.: 3
:param pause : int, e.g.: 0
:return: dataframe
"""
# data to be sent to post request
data = {'startDate': start,
'endDate': end,
'inst': code,
'archive': 'data'}
for _ in range(retry_count):
time.sleep(pause)
try:
r = requests.get(url=vs.DSE_URL+vs.DSE_DEA_URL, params=data)
if r.status_code != 200:
r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_DEA_URL, params=data)
except Exception as e:
print(e)
else:
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.content, 'html5lib')
# columns: date, open, high, close, low, volume
quotes = [] # a list to store quotes
table = soup.find('table', attrs={
'class': 'table table-bordered background-white shares-table fixedHeader'})
for row in table.find_all('tr')[1:]:
cols = row.find_all('td')
quotes.append({'date': cols[1].text.strip().replace(",", ""),
'open': float(cols[6].text.strip().replace(",", "")),
'high': float(cols[4].text.strip().replace(",", "")),
'low': float(cols[5].text.strip().replace(",", "")),
'close': float(cols[7].text.strip().replace(",", "")),
'volume': int(cols[11].text.strip().replace(",", ""))
})
df = | pd.DataFrame(quotes) | pandas.DataFrame |
import logging
import random
import os
import pickle
import pandas as pd
import dataclasses
import json
from dataclasses import dataclass
from typing import List, Optional, Union
import torch.utils.data as data
from tqdm import tqdm
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids_query: List[int]
input_ids_question: List[int]
attention_mask_query: Optional[List[int]] = None
attention_mask_question: Optional[List[int]] = None
label_rank: Optional[Union[int, float]] = None
label_slot: Optional[Union[int, float]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
class QueryDataset(data.Dataset):
def __init__(self, data, slot_dict, tokenizer, data_partition, cache_path,
negative_sampler, max_seq_len=512, max_q_len=20, random_seed=42):
random.seed(random_seed)
self.data = data
self.slot_dict = slot_dict
self.tokenizer = tokenizer # type: TextField or BertTokenizer
self.data_partition = data_partition
assert self.data_partition in ("train", "dev", "test")
self.cache_path = cache_path
self.negative_sampler = negative_sampler
self.max_seq_len = max_seq_len
self.max_q_len = max_q_len
self.instances = []
if str(self.tokenizer.__class__.__name__) == "BertTokenizer":
self._cache_instances_bert()
else:
self._cache_instances()
def _cache_instances_bert(self):
"""
Loads tensors into memory or creates the dataset when it does not exist already.
"""
signature = "{}_n_cand_{}_{}.pkl".\
format(self.data_partition,
self.negative_sampler.num_candidates_samples,
self.tokenizer.__class__.__name__)
path = self.cache_path + "/" + signature
if os.path.exists(path):
with open(path, 'rb') as f:
logging.info("Loading instances from {}".format(path))
self.instances = pickle.load(f)
else:
logging.info("Creating instances with signature {}".format(signature))
# Creating labels (currently there is support only for binary relevance)
relevant_label = 1
not_relevant_label = 0
examples =[]
if self.data_partition == "test":
for row in tqdm(self.data, total=len(self.data)):
query = row[0]
for template in self.negative_sampler.candidates:
examples.append((query, template, 0, 0))
else:
for row in tqdm(self.data, total=len(self.data)):
query = row[0]
template = row[1]
slot = row[2]
slot_label = self.slot_dict[slot]
examples.append((query, template, relevant_label, slot_label))
ns_templates = self.negative_sampler.sample(template)
for ns in ns_templates:
examples.append((query, ns, not_relevant_label, slot_label))
examples_df = | pd.DataFrame(examples) | pandas.DataFrame |
#!/env/bin/python
from tensorflow import keras
from complete_preprocess_script import do_preprocessing
from complete_feature_extraction_script import do_feature_extraction
from Scripts.Feature_extraction.feature_extraction_utilities import dataset_path, dict_path, temp_output_path, output_path
import dask.dataframe as dd
import os
import pathlib as pl
import pandas as pd
import numpy as np
import gc
'''
On the remote machine there will be a /test folder with the raw dataset. This will be our data_path.
All the additional content, deriving from Preprocessing and Feature Extraction, will be placed in the /workflow folder (aka base).
Initially, there will be only 2 subfolders:
- Dictionary: where all dicts, jsons and stuff from FE is placed
- Models: where the models will be placed
The base folder will grow while computing stuff, but during the preparation of then sub we don't care.
We just need to create a workflow folder and under it the aforementioned subfolders with correct stuff inside.
As a peer of this of this folder, there should be the Scripts folder and the two complete-* scripts.
'''
def preprocess_dataset():
data_path = './test'
base_path = './workflow'
dict_path = os.path.join(base_path, 'Dictionary')
all_scripts = [
"pre00_dataset_to_parquet.py",
"pre01_map_user_id_features.py",
"pre02_map_media_features.py",
"pre03_map_link_id.py",
"pre04_map_domains_id.py",
"pre05_map_hashtags_id.py",
"pre06_map_languages_id.py",
#"pre07_map_tweet_id.py",
"pre08_map_tweet_type.py",
"pre09_timestamps.py",
"pre10_text_preprocessing.py",
"pre20_merge_all_mapped_features.py",
# ### "pre21_generate_subsample.py", # should not be used anymore
# "pre22_split_train_val.py"
]
config = {
'original_dataset': os.path.join(data_path, 'part-*'),
'base_path': os.path.join(base_path, ''),
'temp_path': os.path.join(base_path, 'Temp'),
'dict_path': dict_path,
'train_val_ratio': [1, 0],
'dask_tmp_path': os.path.join(base_path, 'Temp', 'dask_tmp'),
}
print(config)
do_preprocessing(config, all_scripts, generate_dict=False, is_test=True)
def extract_features():
base_path = './workflow'
dict_path = os.path.join(base_path, 'Dictionary')
data_path = os.path.join(base_path, 'Full_mapped_dataset')
all_scripts = [
'fe01_follower_features.py',
'fe02_user_hashtags.py',
'fe03_categorical_combo.py',
'fe20_merge_all_features.py',
'fe_32a_target_encoding_split_cols.py',
'fe_33_target_encoding_mapping.py'
]
# define all config paths needed by the subscripts
config = {
'data_path': data_path,
'base_path': os.path.join(base_path, ''),
'temp_path': os.path.join(base_path, 'Temp'),
'preproc_dict_path': dict_path,
'dict_path': dict_path,
'dask_tmp_path': os.path.join(base_path, 'Temp', 'dask_tmp'),
}
print(config)
do_feature_extraction(config, all_scripts, generate_dict=False, is_test=True)
def evaluate():
f = './part.0.parquet'
print('reading parquet')
test = pd.read_parquet(f)
test= | pd.get_dummies(test,columns=["mapped_tweet_type","mapped_language_id"]) | pandas.get_dummies |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
class Data:
'''Obtains hydro data and preprocesses it.'''
def data(self, test_len):
names = ['date', 'price', 'avg_p', 'bid', 'ask',
'o', 'h', 'l', 'c', 'avgp', 'vol', 'oms', 'num']
# get data
df = pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,:1]
df[[1, 2]] = pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,6:8]
df = pd.concat([df, pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,:-1].drop(
columns=['Date'])], axis=1).iloc[::-1].reset_index().drop(columns='index')
df.columns = names
# Filter out null
for name in names:
no_null = []
# check if null exist in column
if any(df[name].isnull()):
# traverse the boolean dataframe
for i, j in enumerate(df[name].isnull()):
if not j:
# hold a value from latest non null
tmp = df[name].iloc[i]
no_null.append(tmp)
else:
no_null.append(tmp)
# put back in dataframe
df[name] = pd.Series(no_null)
# Get float from string
for name in names[1:]:
if type(df[name].iloc[1]) == str:
df[name] = pd.Series([float(i.replace(',', '.')) for i in df[name]])
# Moving averages
ma_sizes = (26,)
ma = {i: [] for i in ma_sizes}
for size in ma_sizes:
for i in range(len(df)):
if i <= size:
ma[size].append(np.average(df['price']))
else:
value = sum(df['price'].values[i - size: i]) / size
ma[size].append(value)
# Exponential moving average
smoother = 2
em_sizes = (12, 20, 26)
em = {i: [] for i in em_sizes}
for size in em_sizes:
em_t = sum(df['price'][:size]) / size
for i in range(len(df)):
if i <= size:
em[size].append(0)
else:
em_t = (df['price'][i] * (
smoother / (1 + size)) + (em_t * (1 - (smoother / (1 + size)))))
em[size].append(em_t)
# MACD
macd1 = [i - j for i, j in zip(em[12], em[26])]
macd2 = []
macd3 = []
em_t = sum(macd1[:9]) / 9
for i in range(len(macd1)):
if i <= 9:
macd2.append(0)
else:
em_t = (macd1[i] * (
smoother / (1 + size)) + (em_t * (1 - (smoother / (1 + size)))))
macd2.append(em_t)
macd3 = [i - j for i, j in zip(macd1, macd2)]
tech = [ma[26], em[12], em[26], macd1, macd2, macd3]
names_df2 = ['ma1', 'em1', 'em2', 'md1', 'md2', 'md3']
names2 = names + names_df2
df2 = pd.DataFrame({i: j for i, j in zip(names_df2, tech)})
# slice the first 26 rows
df3 = | pd.concat([df, df2], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 24 15:37:55 2021
@author: Gary
"""
import pandas as pd
import numpy as np
import build_common
trans_dir = build_common.get_transformed_dir()
lower_tolerance = 95
upper_tolerance = 105
density_min = 6.0
density_max = 13.0
# Normally set to True
remove_dropped_keys = True
class Carrier_ID():
def __init__(self,input_df,data_source='bulk'):
self.remove_dropped_keys = remove_dropped_keys
if not self.remove_dropped_keys:
print(' -- Not removing dropped keys from carrier sets')
self.df = input_df
self.in_upk = self.df.UploadKey
self.in_ik = self.df.IngredientKey
self.data_source = data_source
self.auto_fn = trans_dir+f'{data_source}/carrier_list_auto.csv'
self.curdf_fn = trans_dir+f'{data_source}/carrier_list_curated.csv'
self.probdf_fn = trans_dir+f'{data_source}/carrier_list_prob.csv'
# list of single purpose lables for carriers
self.wlst = ['carrier / base fluid', 'carrier/base fluid', 'carrier fluid',
'carrier','base fluid','base carrier fluid','carrier/base flud',
'base fluid / carrier','carrier/ base fluid','base/carrier fluid',
'carrier base fluid','water','base fluid ',' carrier / base fluid ',
'base fluid & mix water', 'base fluid & mix water,', 'fresh water',
'carrier/base fluid ', 'treatment carrier', 'carrier/basefluid',
'carrying agent', 'base / carrier fluid', 'carrier / base fluid - water',
'carrier fluid ', 'base frac fluid', 'water',
'water / produced water', 'carrier ', 'base carrier',
'fracture fluid', 'frac base fluid']
self.proppants = ['14808-60-7','1302-93-8','1318-16-7','1302-74-5','1344-28-1','14464-46-1','7631-86-9','1302-76-7']
self.gasses = ['7727-37-9','124-38-9']
self.merge_bgCAS()
self.make_MI_fields()
self.make_percent_sums()
self.fetch_carrier_lists()
self.check_for_prob_disc()
self.check_for_auto_disc()
self.check_auto_against_list()
self.save_curation_candidates()
def check_for_removed_keys(self,ref_df,do_IngKey=True):
"""When saved IngredientKeys are missing in new data sets, we drop the
associated disclosures from the curated list. This forces a new evaluation
of those disclosures in case they have been changed."""
if self.remove_dropped_keys:
testupk = pd.merge(self.in_upk,ref_df[['UploadKey']],
on='UploadKey',how='outer',indicator=True)
#print(testupk[testupk['_merge']=='right_only'])
dropkeys = testupk[testupk['_merge']=='right_only'].UploadKey.tolist()
if len(dropkeys)>0:
print(f' ** Dropping {len(dropkeys)} carriers because UploadKeys are missing in latest data')
ref_df = ref_df[~(ref_df.UploadKey.isin(dropkeys))]
#print(testupk.head(10))
if do_IngKey:
testik = pd.merge(self.in_ik,ref_df[['IngredientKey']],
on='IngredientKey',how='outer',indicator=True)
#print(testik[testik['_merge']=='right_only'])
dropkeys = testik[testik['_merge']=='right_only'].IngredientKey.tolist()
if len(dropkeys)>0:
print(f' ** Dropping {len(dropkeys)} carriers because IngredientKeys are missing in latest data')
ref_df = ref_df[~(ref_df.IngredientKey.isin(dropkeys))]
return ref_df
def fetch_carrier_lists(self):
print(' -- loading auto-detected records')
self.autodf = pd.read_csv(self.auto_fn,low_memory=False,
quotechar='$',encoding='utf-8')
self.autodf['is_new'] = False
self.autodf = self.check_for_removed_keys(self.autodf)
self.remove_disclosures(self.autodf)
print(' -- loading curation-detected records')
self.curdf = pd.read_csv(self.curdf_fn,low_memory=False,
quotechar='$',encoding='utf-8')
self.curdf['is_new'] = False
self.curdf = self.check_for_removed_keys(self.curdf)
self.remove_disclosures(self.curdf)
print(' -- loading problem records')
self.probdf = pd.read_csv(self.probdf_fn,low_memory=False,
quotechar='$',
encoding='utf-8')
self.probdf['is_new'] = False
self.probdf = self.check_for_removed_keys(self.probdf,do_IngKey=False)
self.remove_disclosures(self.probdf)
def merge_bgCAS(self):
#casing = pd.read_csv('./sources/casing_curate_master.csv',
casing = pd.read_csv(trans_dir+'casing_curated.csv',
quotechar='$',encoding='utf-8')
casing['is_valid_CAS'] = casing.bgCAS.str[0].isin(['0','1','2','3','4',
'5','6','7','8','9'])
self.df = pd.merge(self.df,casing[['CASNumber','IngredientName',
'bgCAS','is_valid_CAS']],
on=['CASNumber','IngredientName'],how='left')
self.df.is_valid_CAS.fillna(False,inplace=True)
def make_MI_fields(self):
# remove records that are more likely unreliable: when MI is small
cond = (self.df.MassIngredient>2)&(self.df.PercentHFJob>0)
t = self.df[cond][['MassIngredient','PercentHFJob','UploadKey']].copy()
# make a simple ratio of MI to %HFJ. If everything is consistent, this
# ratio should essentially be the same for all records in a disclosure
t['permassratio'] = t.MassIngredient/t.PercentHFJob
gb = t.groupby('UploadKey',as_index=False)['permassratio'].agg(['min','max']).reset_index()
gb.columns = ['UploadKey','small','big']
gb['rat_dev'] = (gb.big-gb.small)/gb.big
# set MIok to true if the range within a disclosure is less than 10%
# MIok is a disclosure level flag.
gb['MIok'] = gb.rat_dev<.1
print(f'Creating MIok: Number disclosures with MI: {len(gb)}, out of tolerance: {len(gb[gb.rat_dev>0.1])}')
self.df = pd.merge(self.df,gb[['UploadKey','MIok']],on='UploadKey',how='left')
self.df.MIok = np.where(~cond,False,self.df.MIok)
cond2 = (self.df.MassIngredient>5)&(self.df.TotalBaseWaterVolume>10000)&self.df.MIok
self.df['dens_test'] = np.where(cond2,
self.df.MassIngredient/self.df.TotalBaseWaterVolume,
np.NaN)
# density can be within pretty wide range; will check again at
c1 = self.df.dens_test>density_min
c2 = self.df.dens_test<density_max
self.df['maybe_water_by_MI']=np.where(c1&c2,'yes','no')
self.df.maybe_water_by_MI = np.where(self.df.dens_test.isna(),
'not testable',
self.df.maybe_water_by_MI)
def make_percent_sums(self):
gball = self.df.groupby('UploadKey',as_index=False)[['PercentHFJob',
'is_valid_CAS']].sum()
gball['has_no_percHF'] = ~(gball.PercentHFJob>0)
gball['has_no_valid_CAS'] = ~(gball.is_valid_CAS>0)
gbmax = self.df.groupby('UploadKey',as_index=False)[['PercentHFJob',
'TotalBaseWaterVolume']].max()
gbmax.columns = ['UploadKey','PercMax','TBWV']
gball = pd.merge(gball,gbmax,on='UploadKey',how='left')
cond = self.df.PercentHFJob>0
gbw = self.df[cond].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbw.columns = ['UploadKey','percSumAll']
gbwo = self.df[cond&self.df.is_valid_CAS].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbwo.columns = ['UploadKey','percSumValid']
gbwoSA = self.df[cond&(~(self.df.bgCAS=='sysAppMeta'))].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbwoSA.columns = ['UploadKey','percNoSysApp']
mg = pd.merge(gball,gbw,on=['UploadKey'],how='left')
mg = pd.merge(mg,gbwo,on='UploadKey',how='left')
mg = pd.merge(mg,gbwoSA,on='UploadKey',how='left')
c1 = self.df.bgCAS.isin(self.proppants)
c2 = self.df.Purpose == 'Proppant'
gbprop = self.df[cond&(c1|c2)].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbprop.columns = ['UploadKey','percProp']
mg = pd.merge(mg,gbprop,on='UploadKey',how='left')
gbgas = self.df[self.df.bgCAS.isin(self.gasses)].groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gbgas.columns = ['UploadKey','percGas']
self.disc = pd.merge(mg,gbgas,on='UploadKey',how='left')
def addToProbDict(self,dic,UploadKeyList,problem):
for upl in UploadKeyList:
dic.setdefault(upl, []).append(problem)
return dic
def check_for_prob_disc(self):
d = {}
upkl = self.disc[~(self.disc.TBWV>0)].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 1)
upkl = self.disc[self.disc.percSumValid>upper_tolerance].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 3)
upkl = self.disc[self.disc.percNoSysApp>upper_tolerance].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 4)
upkl = self.disc[self.disc.has_no_percHF].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 2)
upkl = self.disc[self.disc.has_no_valid_CAS].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 0)
upkl = self.disc[self.disc.percProp>=50].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 5)
upkl = self.disc[self.disc.percSumAll<90].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 6)
# if gasses are dominant
upkl = self.disc[self.disc.percGas>=50].UploadKey.unique().tolist()
d = self.addToProbDict(d, upkl, 7)
# =============================================================================
# # if MI not ok - when something is wrong with MI, can't trust other numbers.
# upkl = self.disc[~self.disc.MI_inconsistent].UploadKey.unique().tolist()
# d = self.addToProbDict(d, upkl, 8)
#
# =============================================================================
# =============================================================================
# cond = (self.disc.percSumValid>0.95) & (self.disc.percSumValid<1.05)
# upkl = self.disc[cond].UploadKey.unique().tolist()
# d = self.addToProbDict(d, upkl, 5)
# =============================================================================
print(f'New problem disclosures found: {len(d)} ')
uploadKeys = []
problems = []
for upk in d.keys():
uploadKeys.append(upk)
problems.append(str(d[upk])[1:-1])
pdf = pd.DataFrame({'UploadKey':uploadKeys,
'reasons':problems})
pdf['is_new'] = True
self.probdf = pd.concat([pdf,self.probdf],sort=True)
self.probdf.to_csv('./tmp/carrier_list_prob_NEW.csv',encoding= 'utf-8',quotechar='$',index=False)
self.remove_disclosures(pdf)
def auto_set_1(self):
""" THis is the most basic auto algorithm set:
- looking only at records with valid CAS numbers
- single record with a carrier purpose
- CASNumber is water
- 50% < %HFJob < 100% (single 100% records not ok)
"""
t = self.df[self.df.is_valid_CAS].copy()
t['has_purp'] = t.Purpose.str.strip().str.lower().isin(self.wlst)
gbp = t.groupby('UploadKey',as_index=False)['has_purp'].sum()
t = t.drop('has_purp',axis=1)
t = pd.merge(t,gbp,on='UploadKey',how='left')
#print(f'IS IN t: {(t.UploadKey=="ffd52c1a-1868-4b7f-a8a8-47c6621d4802").sum()}')
c1 = t.has_purp==1 # only 1 record with Purpose in wlst
c2 = t.bgCAS == '7732-18-5' # must be water
c3 = (t.PercentHFJob >= 50)&(t.PercentHFJob < 100) # should be at least this amount
c4 = t.Purpose.str.strip().str.lower().isin(self.wlst)
slic = t[c1&c2&c3&c4][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's1'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_1: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (MIdensity out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_2(self):
""" THis basic auto algorithm set allows more than one water record, but still restricted:
- only include records with valid CAS numbers as water
- sum of %HFJob for all is < 100
- sum of water records should be >50 % (otherwise we pick up Nitrogen dominated fracks)
Note this can still produce single record carriers if only one of
the identified 'carrier/base' records meets the criteria especially
that there is more than one carrier record, but only one is water. Set 1
requires that there is only ONE carrier record.
"""
t = self.df[self.df.is_valid_CAS].copy()
#print(self.df.columns)
t['has_purp'] = (t.Purpose.str.strip().str.lower().isin(self.wlst))\
&(t.PercentHFJob>0) # prevent some carriers with no %HFJ from the calculation
# Added 11/9/2021, after removing all previous S2 from
gbp = t.groupby('UploadKey',as_index=False)['has_purp'].sum()
gbwater = t[t.bgCAS=='7732-18-5'].groupby('UploadKey',as_index=False)\
['PercentHFJob'].sum().rename({'PercentHFJob':'perc_water'},axis=1)
t = t.drop('has_purp',axis=1)
t = pd.merge(t,gbp,on='UploadKey',how='left')
t = pd.merge(t,gbwater,on='UploadKey',how='left')
# first find each prospective record could be part of carrier
c1 = t.has_purp>1 # requires more than one carrier in disclosure
c2 = t.bgCAS == '7732-18-5' # keep only water records as carrier
c3 = t.Purpose.str.strip().str.lower().isin(self.wlst)
c4 = t.PercentHFJob > 0 # added 11/9/2021
c5 = t.perc_water>=50 # added 11/15/2021
slic = t[c1&c2&c3&c4&c5][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume','MIok']].copy()
# make sure sum percentage of slic records is not too much
gb = slic.groupby('UploadKey',as_index=False)[['PercentHFJob']].sum()
gb['test'] = gb.PercentHFJob<100
#print(f'Auto_set_2: detected length {len(gb)} ')
slic = pd.merge(slic,gb[['UploadKey','test']],on='UploadKey',how='left')
slic = slic[slic.test]
slic = slic[slic.test].drop('test',axis=1)
# check what MI has to say about these fields
gb = slic.groupby('UploadKey',as_index=False)[['MassIngredient']].sum()
gb.columns = ['UploadKey','sumMass']
gb2 = slic.groupby('UploadKey',as_index=False)[['TotalBaseWaterVolume','MIok']].first()
gb = pd.merge(gb,gb2,on='UploadKey',how='left')
gb['dens_test'] = gb.sumMass/gb.TotalBaseWaterVolume
gb['no_keep'] = ((gb.dens_test<density_min)|(gb.dens_test>density_max))&gb.MIok
print(f"Auto_set_2: new {len(gb)}, not kept (density out of range): {len(gb[gb.no_keep])}")
slic = pd.merge(slic,gb[['UploadKey','no_keep']],on='UploadKey',how='left')
slic = slic[~slic.no_keep].drop('no_keep',axis=1)
checkver = pd.concat([slic,pd.DataFrame({'UploadKey':slic.UploadKey.unique().tolist()})])
checkver.sort_values(['UploadKey','PercentHFJob'],ascending=False).to_csv('./tmp/temp.csv')
slic['auto_carrier_type'] = 's2'
slic['is_new'] = True
return slic
def auto_set_3(self):
""" Set3 has three conditions:
- CASNumber is water (7732-18-5)
- IngredientName has the words "including mix water" (a common identifier)
- that record is > 40% PercentHFJob
These records do not have direct indications of carrier records in
the Purpose (which is often cluttered with multiple purposes) but
are clearly single record water-based carriers.
"""
t = self.df[self.df.is_valid_CAS].copy()
c1 = t.IngredientName.str.contains('including mix water')
c2 = t.bgCAS == '7732-18-5' # must be water
c3 = (t.PercentHFJob >= 40)&(t.PercentHFJob < 100) # should be at least this amount
slic = t[c1&c2&c3][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's3'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_3: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (density out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_4(self):
""" Set4 has four conditions:
- CASNumber is 'MISSING'
- IngredientName has the words "including mix water" (a common identifier)
- that record is > 60% PercentHFJob
- the total_percent_valid_job (including the "including mix" record) is <105%
These records do not have direct indications of carrier records in
the Purpose (which is often cluttered with multiple purposes) but
are clearly single record water-based carriers.
"""
precond = (self.df.CASNumber=='MISSING')&\
(self.df.IngredientName.str.contains('including mix water'))&\
((self.df.PercentHFJob >= 60)&(self.df.PercentHFJob < 100))
#print(f'Number of raw records with primary condition: {precond.sum()}')
t = self.df[(self.df.is_valid_CAS)|precond|(self.df.bgCAS=='proprietary')].copy()
gb = t.groupby('UploadKey',as_index=False)['PercentHFJob'].sum()\
.rename({'PercentHFJob':'totPercent'},axis=1)
t = pd.merge(t,gb,on='UploadKey',how='left')
# calc total%
cond = (t.CASNumber=='MISSING')&\
(t.IngredientName.str.contains('including mix water'))&\
((t.PercentHFJob >= 60)&(t.PercentHFJob < 100))
c1 = (t.totPercent>95) & (t.totPercent<105)
slic = t[c1&cond][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's4'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_4: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (density out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_5(self):
""" This is just like set one, except that no carrier purpose is present:
- looking only at records with valid CAS numbers
- CASNumber is water
- 50% < %HFJob < 100% (single 100% records not ok)
"""
t = self.df[self.df.is_valid_CAS].copy()
t['has_purp'] = t.Purpose.str.strip().str.lower().isin(self.wlst)
gbp = t.groupby('UploadKey',as_index=False)['has_purp'].sum()
t = t.drop('has_purp',axis=1)
t = pd.merge(t,gbp,on='UploadKey',how='left')
c1 = t.has_purp==0 # no records with Purpose in wlst
c2 = t.bgCAS == '7732-18-5' # must be water
c3 = (t.PercentHFJob >= 50)&(t.PercentHFJob < 100) # should be at least this amount
#c4 = t.Purpose.str.strip().str.lower().isin(self.wlst)
slic = t[c1&c2&c3][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's5'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_5: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (MIdensity out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_6(self):
""" Similar to set 1;
- bgCAS is ambiguousID
- single record with a carrier purpose
- IngredientName is either in 'wst' list or has "water" in it
- 50% < %HFJob < 100% (single 100% records not ok)
"""
t = self.df.copy()
t['has_purp'] = t.Purpose.str.strip().str.lower().isin(self.wlst)
gbp = t.groupby('UploadKey',as_index=False)['has_purp'].sum()
t = t.drop('has_purp',axis=1)
t = pd.merge(t,gbp,on='UploadKey',how='left')
t.TradeName = t.TradeName.str.lower()
t.TradeName.fillna('empty',inplace=True)
c1 = t.has_purp==1 # only 1 record with Purpose in wlst
c2 = t.bgCAS == 'ambiguousID' # must be water
c3 = (t.PercentHFJob >= 50)&(t.PercentHFJob < 100) # should be at least this amount
c4 = t.Purpose.str.strip().str.lower().isin(self.wlst)
c5 = t.IngredientName.isin(self.wlst)|t.IngredientName.str.contains('water')
c6 = t.TradeName.isin(self.wlst)|t.TradeName.str.contains('water')
c6 = (~(t.TradeName.str.contains('slick'))) & c6 # prevent 'slickwater' from counting as 'water'
slic = t[c1&c2&c3&c4&c5&c6][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's6'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_6: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (MIdensity out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_7(self):
""" Like set_1, but for salted water:
- looking only at records with valid CAS numbers
- single record with a carrier purpose
- CASNumber is either 7447-40-7 or 7647-14-5
- 50% < %HFJob < 100% (single 100% records not ok)
"""
t = self.df[self.df.is_valid_CAS].copy()
t['has_purp'] = t.Purpose.str.strip().str.lower().isin(self.wlst)
gbp = t.groupby('UploadKey',as_index=False)['has_purp'].sum()
t = t.drop('has_purp',axis=1)
t = pd.merge(t,gbp,on='UploadKey',how='left')
c1 = t.has_purp==1 # only 1 record with Purpose in wlst
c2 = t.bgCAS.isin(['7447-40-7','7647-14-5']) # kcl or nacl
c3 = (t.PercentHFJob >= 50)&(t.PercentHFJob < 100) # should be at least this amount
c4 = t.Purpose.str.strip().str.lower().isin(self.wlst)
slic = t[c1&c2&c3&c4][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's7'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_7: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (MIdensity out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_8(self):
""" Many skytruth carriers have this profile;
- bgCAS is ambiguousID or 7732-18-5
- IngredientName is MISSING
- Purpose is "unrecorded purpose"
- TradeName has either "water" or "brine"
- can be one or two records in each disclosure
- 50% < sum of %HFJob < 100%
"""
t = self.df.copy()
#gbp = t.groupby('UploadKey',as_index=False)['has_unrec_purp'].sum()
#t = t.drop('num_unrec_purp',axis=1)
#t = pd.merge(t,gbp,on='UploadKey',how='left')
t.TradeName = t.TradeName.str.lower()
t.TradeName.fillna('empty',inplace=True)
c1 = t.Purpose == 'unrecorded purpose'
c2 = t.bgCAS.isin(['ambiguousID','7732-18-5'])
c3 = t.IngredientName=='MISSING'
c4 = t.TradeName.str.contains('water')
c4 = (~(t.TradeName.str.contains('slick'))) & c4 # prevent 'slickwater' from counting as 'water'
tt = t[c1&c2&c3&c4].copy()
gb = tt.groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gb.columns = ['UploadKey','unrec_percent']
tt = pd.merge(tt,gb,on='UploadKey',how='left')
c5 = (tt.unrec_percent >= 50)&(tt.unrec_percent< 100) # should be at least this amount
slic = tt[c5][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's8'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_8: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (MIdensity out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def auto_set_9(self):
""" Many skytruth carriers have this profile;
- bgCAS is ambiguousID or 7732-18-5
- IngredientName is MISSING
- Purpose is one of the standard carrier words or phrases
- TradeName has either "water" or "brine"
- can be one or two records in each disclosure
- 50% < sum of %HFJob < 100%
"""
t = self.df.copy()
t.TradeName = t.TradeName.str.lower()
t.TradeName.fillna('empty',inplace=True)
c1 = t.Purpose.str.strip().str.lower().isin(self.wlst)
c2 = t.bgCAS.isin(['ambiguousID','7732-18-5'])
c3 = t.IngredientName=='MISSING'
c4 = t.TradeName.str.contains('water')
c4 = (~(t.TradeName.str.contains('slick'))) & c4 # prevent 'slickwater' from counting as 'water'
tt = t[c1&c2&c3&c4].copy()
gb = tt.groupby('UploadKey',as_index=False)['PercentHFJob'].sum()
gb.columns = ['UploadKey','unrec_percent']
tt = pd.merge(tt,gb,on='UploadKey',how='left')
c5 = (tt.unrec_percent >= 50)&(tt.unrec_percent< 100) # should be at least this amount
slic = tt[c5][['IngredientKey','UploadKey','CASNumber',
'IngredientName','Purpose','TradeName',
'PercentHFJob','bgCAS','maybe_water_by_MI','dens_test',
'MassIngredient','TotalBaseWaterVolume']].copy()
slic['auto_carrier_type'] = 's9'
slic['is_new'] = True
#print(f'Disclosure is in set: {len(slic[slic.UploadKey=="f961a561-edd3-4c9e-8b38-3ba58d2b73c9"])}')
print(f"Auto_set_9: new {len(slic)}, maybe_water_by_MI? {len(slic[slic.maybe_water_by_MI=='yes'])}, not kept (MIdensity out of range): {len(slic[slic.maybe_water_by_MI=='no'])}")
slic = slic[~(slic.maybe_water_by_MI=='no')] # don't keep those flagged disclosures
return slic
def check_for_auto_disc(self):
results = []
res = self.auto_set_1()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_2()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_3()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_4()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_5()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_6()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_7()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_8()
self.remove_disclosures(res)
results.append(res)
res = self.auto_set_9()
self.remove_disclosures(res)
results.append(res)
results.append(self.autodf)
self.autodf = pd.concat(results,sort=True)
self.autodf.to_csv('./tmp/carrier_list_auto_NEW.csv',quotechar='$',
encoding = 'utf-8',index=False)
#print(f'New auto-detected carriers: {len(slic)}')
#print(f'Remaining disclosures: {len(self.disc)}')
def check_auto_against_list(self):
""" Used to compare what has been identified by auto with another list
(such as a list from a previous version). Any non-match is found in df
and saved in a curation-like file..."""
prev = pd.read_csv('./tmp/mass_keys_v9.csv')
curr = pd.DataFrame({'UploadKey':self.autodf.UploadKey.unique().tolist()})
mg = pd.merge(prev,curr,on='UploadKey',how='outer',indicator=True)
just_prev = mg[mg['_merge']=='left_only'][['UploadKey']]
print(f'Number from old (v9): {len(prev)}, current: {len(curr)}, not in new {len(just_prev)}')
# make out df for review
# get %HFsums into the list
self.df = pd.merge(self.df,self.disc[['UploadKey','percSumValid','percSumAll']],
on='UploadKey',how='left')
out = pd.merge(just_prev,self.df[self.df.PercentHFJob>5][['UploadKey','dens_test','maybe_water_by_MI',
'TotalBaseWaterVolume','MIok',
'CASNumber','bgCAS','IngredientName',
'Purpose','PercentHFJob','TradeName',
'percSumValid','percSumAll']],
on='UploadKey',how='left')
out = pd.concat([out,out[['UploadKey']]],sort=True)\
.sort_values(['UploadKey','PercentHFJob'],ascending=False)
out.to_csv('./tmp/massDisclosures_not_yet_caught.csv')
def save_curation_candidates(self):
c1 = self.df.Purpose.str.strip().str.lower().isin(self.wlst)
c2 = self.df.PercentHFJob>=5
t = self.df[c1|c2].copy()
#t['multi_rec'] = t.UploadKey.duplicated(keep=False)
t['is_new'] = True
t['cur_carrier_status'] = ''
t['is_water_carrier'] = ''
ukt = t.UploadKey.unique().tolist()
print(f'Still to be curated (all data sets): {len(t.UploadKey.unique())} ')
self.curdf.is_new = False
self.curdf = | pd.concat([self.curdf,t],sort=True) | pandas.concat |
#june 2014
#dget RNA data for candidate CNV genes
import csv
import math
import numpy as np
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import math
import itertools
from itertools import zip_longest
import pandas as pd
import timeit
#function to transpose
def transpose(mylist):
return [list(i) for i in zip(*mylist)]
#function for significant digits
from math import log10, floor
def round_to_2(x):
digits = -int(floor(log10(x))-1)
digit_str = '.' + str(digits) + 'f'
return float(format(x, digit_str))
#function for testing if a string is a number
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
#get filtered gene list
with open('BRCA_CNVs_foldchange_all_filtered.csv', 'r') as filtered:
filtered = csv.reader(filtered)
filtered_genelist = next(filtered)
genelist = list(filtered_genelist)[1:]
#assign synonyms
cands = pd.read_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step4.csv',header=0)
sym = cands[(cands.Symbol.isin(genelist))|(cands.Synonym.isin(genelist))]
sym = sym[['Symbol','Synonym']]
sym.drop_duplicates(inplace=True)
#assign gene IDs
ncbi = pd.read_csv('Homo_sapiens.gene_info',sep='\t',nrows=1, names = range(50))
ncbi.dropna(how='all',inplace=True)
ncbi.dropna(how='all',inplace=True,axis=1)
columns = ncbi.ix[0,0].split('(')[0].split()[1:]
ncbi = pd.read_csv('Homo_sapiens.gene_info',sep='\t',names=columns,skiprows=1)
ncbi = ncbi[['GeneID','Symbol','Synonyms','Symbol_from_nomenclature_authority','Other_designations']]
ncbi.GeneID = ncbi.GeneID.astype(str)
def ncombine(row):
s = row.GeneID+'|'+row.Symbol+'|'+row.Symbol_from_nomenclature_authority
if row.Synonyms != '-':
s += '|'+row.Synonyms
if row.Other_designations != '-':
s += '|'+row.Other_designations
return s
#ncbi['All Symbols'] = ncbi.apply(ncombine, axis=1)
#ncbi = ncbi[['GeneID','All Symbols']]
def Symb(row,x):
ls = row['All Symbols'].split('|')
if x in ls:
return x
def ID(row):
if (row.name/100).is_integer():
print(row.name)
if '?' not in row.Symbol:
if ncbi[ncbi['Symbol']==row.Symbol].shape[0] ==1:
frame = ncbi[ncbi['Symbol']==row.Symbol]
return frame.reset_index()['GeneID'][0]
elif ncbi[ncbi['Symbol_from_nomenclature_authority']==row.Symbol].shape[0] ==1:
frame = ncbi[ncbi['Symbol_from_nomenclature_authority']==row.Symbol]
return frame.reset_index()['GeneID'][0]
elif ncbi[ncbi['Synonyms'].str.contains(row.Symbol)].shape[0] ==1:
frame = ncbi[ncbi['Synonyms'].str.contains(row.Symbol)]
ls = frame.reset_index()['Synonyms'][0].split('|')
if row.Symbol in ls:
return frame.reset_index()['GeneID'][0]
elif ncbi[ncbi['Other_designations'].str.contains(row.Symbol)].shape[0] ==1:
frame = ncbi[ncbi['Other_designations'].str.contains(row.Symbol)]
ls = frame.reset_index()['Other_designations'][0].split('|')
if row.Symbol in ls:
return frame.reset_index()['GeneID'][0]
print('assigning geneIDs to synonyms for',sym.shape[0],'lines')
sym['ID'] = sym.apply(ID,axis=1)
ls = list(set(sym.values.ravel()))[1:]
##ls.remove('176')
##ls.remove('746')
##ls.remove('1749')
##ls.remove('10992')
##ls.remove('9410')
##function for dealing with gene synonyms
def Syn(row):
if sym[sym.Symbol==row.gene].shape[0] > 0:
return row.gene
elif sym[sym.Symbol==row.gene_id].shape[0] > 0:
return row.gene_id
elif sym[sym.Synonym==row.gene].shape[0] ==1:
frame = sym[sym.Synonym==row.gene]
return str(frame.reset_index()['Symbol'][0])
elif sym[sym.Synonym==row.gene_id].shape[0] ==1:
frame = sym[sym.Synonym==row.gene_id]
return str(frame.reset_index()['Symbol'][0])
elif sym[sym.ID==row.id].shape[0] ==1:
frame = sym[sym.ID==row.id]
return frame.reset_index()['Symbol'][0]
else: return ''
#generate summary files of RNAseq data of interest for all tumors by original barcode
#def getRNApandas():
print('getting RNASeq values for gene list with pandas...')
with open('../BRCA_pathology_2014.csv', 'r') as path_file:
path = list(csv.reader(path_file))
filemap = pd.read_csv('../../RNASEQ/FILE_SAMPLE_MAP.txt','\t',header=0,dtype=str)
filemap['tumor-normal'] = filemap['barcode(s)'].str[13:14]
filemap['barcode(s)'] = filemap['barcode(s)'].str[:12]
filemap = filemap[filemap['tumor-normal']=='0']
filemap = filemap[filemap['filename'].str.contains('unc.edu')]
filemap = filemap[filemap['filename'].str.contains('genes.normalized_results')]
df = pd.DataFrame(index=ls,dtype=float)
with open('../../RNASEQ/FILE_SAMPLE_MAP.txt', 'r') as file_map:
file_map = list(csv.reader(file_map, delimiter='\t'))
for tumorID in path[1:]:
if float((len(path[1:]) - path[1:].index(tumorID))/50).is_integer():
print(str(len(path[1:]) - path[1:].index(tumorID)) + ' ' + 'tumors left')
if len(tumorID) > 0:
tumor_row = filemap[filemap['barcode(s)']==tumorID[0]]
if tumor_row.shape[0] == 1:
file_name = '../../RNASEQ/RNASeqV2/UNC__IlluminaHiSeq_RNASeqV2/Level_3/' + tumor_row['filename'].max()
if tumor_row['barcode(s)'].max() in df.columns.values:
df.drop(tumor_row['barcode(s)'].max(),axis=1,inplace=True)
else:
sample_RNA = pd.read_csv(file_name,header=0,sep='\t')
sample_RNA['gene'] = sample_RNA['gene_id'].str.split('|').apply(lambda x: x[0])
sample_RNA['id'] = sample_RNA['gene_id'].str.split('|').apply(lambda x: x[1])
sample_RNA = sample_RNA[(sample_RNA['gene'].isin(ls))|(sample_RNA['gene_id'].isin(ls))|(sample_RNA['id'].isin(ls))]
sample_RNA.drop_duplicates(subset='gene_id',inplace=True)
sample_RNA['symbol'] = sample_RNA.apply(Syn,axis=1)
sample_RNA.symbol = sample_RNA.symbol.astype(str)
sample_RNA.set_index('symbol',inplace=True)
sample_RNA = sample_RNA[sample_RNA.index.isin(genelist)]
#print(sample_RNA.head())
#for now drop gene names that are duplicated at this point--there are very few
ser = pd.Series(sample_RNA.index)
sample_RNA.drop(ser[ser.duplicated()].tolist(),axis=0,inplace=True)
df[tumorID[0]] = sample_RNA['normalized_count']
#print(df.head())
df.dropna(how='all',inplace=True)
df.dropna(how='all',axis=1,inplace=True)
df = df.T
df.insert(0,'Complete TCGA ID',df.index.values)
df.to_csv('BRCA_RNA_candidates.csv',index=False)
print('created RNA file')
#convert to z-scores
def zscores():
with open('BRCA_RNA_candidates.csv', 'r') as RNA:
RNA = csv.reader(RNA)
RNA = list(RNA)
RNA_tr = transpose(RNA)
z_list_tr = []
z_list_tr.append(RNA_tr[0])
for cand in range(1,len(RNA[0])):
#print(RNA[0][cand])
RNA_list = []
for i in RNA_tr[cand]:
if isnumber(i):
RNA_list.append(float(i))
normal = scipy.stats.normaltest(RNA_list)
z_array = scipy.stats.zscore(RNA_list)
z_list_cand = list(z_array)
z_list_cand.insert(0, RNA[0][cand])
z_list_tr.append(z_list_cand)
z_list = transpose(z_list_tr)
with open('BRCA_RNA_z_scores.csv','w+') as z_scores:
z_scores = csv.writer(z_scores)
for line in z_list:
z_scores.writerow(line)
print('created z-scores file')
###RUN
#getRNApandas()
zscores()
cands = pd.read_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step4.csv',header=0)
with open('BRCA_RNA_z_scores.csv','r') as z:
z = csv.reader(z)
z = next(z)
z = list(z)[1:]
sub = cands[cands.Symbol.isin(z)]
rest = cands[~(cands.Symbol.isin(z))]
sub['Has RNA data'] = 'yes'
rest['Has RNA data'] = 'no'
cands = | pd.concat([sub,rest]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = | pd.merge(result,feat,on=['shop_id'],how='left') | pandas.merge |
import numpy as np
import pandas as pd
from collections import defaultdict
import time
import matplotlib.pyplot as plt
import optuna
import shap
from optuna.integration import LightGBMPruningCallback, XGBoostPruningCallback
from sklearn.base import BaseEstimator, TransformerMixin, is_classifier
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import SimpleImputer, KNNImputer, IterativeImputer
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import BayesianRidge, LogisticRegression, SGDClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import (
OrdinalEncoder,
OneHotEncoder,
StandardScaler,
MinMaxScaler,
RobustScaler,
)
from sklearn.feature_selection import mutual_info_classif, mutual_info_regression
from imblearn.over_sampling import SMOTE, SMOTENC, ADASYN
from imblearn.combine import SMOTEENN, SMOTETomek
from imblearn.pipeline import make_pipeline as imb_make_pipeline
from sklearn.cluster import KMeans
from hdbscan import HDBSCAN
from sklearn.model_selection import (
train_test_split,
cross_val_score,
KFold,
StratifiedKFold,
)
from lightgbm import LGBMRegressor, LGBMClassifier, early_stopping
from xgboost import XGBRegressor, XGBClassifier
from catboost import CatBoostRegressor, CatBoostClassifier
from sklearn.calibration import calibration_curve
from sklearn.metrics import (
precision_recall_curve,
accuracy_score,
balanced_accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
mean_squared_error,
r2_score,
brier_score_loss,
log_loss,
)
from helper_functions.general_functions import get_state_region
from typing import Tuple, List, Dict, Callable
def adj_threshold_to_labels(model_probs: np.ndarray, threshold: float) -> np.ndarray:
"""From input of positive class probabilites applies
threshold to positive probabilities to create labels"""
return (model_probs >= threshold).astype("int")
def reversed_feature(feature: np.ndarray) -> np.ndarray:
"From input creates a reversed feature"
return np.power((feature.astype(float)), -1)
def log_feature(feature: np.ndarray) -> np.ndarray:
"From input creates a log feature"
return np.log(((feature.astype(float)) + 1e-4))
def squared_feature(feature: np.ndarray) -> np.ndarray:
"From input creates a squared feature"
return np.power((feature.astype(float)), 2)
def cubic_feature(feature: np.ndarray) -> np.ndarray:
"From input creates a cubic feature"
return np.power((feature.astype(float)), 3)
def qbinned_feature(feature: np.ndarray) -> np.ndarray:
"From input creates a binned feature"
quartile_list = [0, 0.25, 0.5, 0.75, 1.0]
bins = np.quantile(feature, quartile_list)
return np.digitize(feature, bins)
def numeric_imputation_search(
X: pd.DataFrame,
y: pd.DataFrame,
eval_model: BaseEstimator,
random_state: int,
scoring: str,
) -> plt.figure:
"Searches for best numeric imputation method and plots results"
cv = 5
baseline_scores = pd.DataFrame()
X_baseline = X.copy()
y_baseline = y.copy()
max_col = X_baseline.isnull().sum().idxmax()
drop_idx = X_baseline[X_baseline[max_col].isnull() == True].index
X_baseline = X_baseline.drop(drop_idx)
y_baseline = y_baseline.drop(drop_idx)
baseline_scores["No imputation, instances dropped"] = cross_val_score(
eval_model, X_baseline, y_baseline, scoring=scoring, cv=cv
)
si_scores = pd.DataFrame()
for strategy in ["mean", "median"]:
pipe = make_pipeline(SimpleImputer(strategy=strategy), eval_model)
si_scores[f"SimpleImputer (strategy= {strategy}"] = cross_val_score(
pipe, X, y, scoring=scoring, cv=cv
)
ii_scores = pd.DataFrame()
for estimator in [BayesianRidge(), ExtraTreesRegressor(random_state=random_state)]:
pipe = make_pipeline(
IterativeImputer(estimator=estimator, random_state=random_state), eval_model
)
ii_scores[estimator.__class__.__name__] = cross_val_score(
pipe, X, y, scoring=scoring, cv=cv
)
knn_scores = pd.DataFrame()
n_neighbors = [2, 3, 5, 7, 9]
for k in n_neighbors:
pipe = make_pipeline(KNNImputer(n_neighbors=k), eval_model)
knn_scores[f"KNN(k = {k})"] = cross_val_score(
pipe, X, y, scoring=scoring, cv=cv
)
final_scores = pd.concat(
[baseline_scores, si_scores, ii_scores, knn_scores],
axis=1,
keys=["baseline_score", "simple_imputer", "iterative_imputer", "knn_imputer"],
)
fig, ax = plt.subplots(figsize=(14, 8))
means, errors = final_scores.mean().sort_values(ascending=False), final_scores.std()
means.plot.barh(xerr=errors, ax=ax)
ax.set_title(
f"Different Imputation Methods results with {eval_model.__class__.__name__}"
)
ax.set_xlabel(f"{scoring.capitalize()} score")
ax.set_yticks(np.arange(means.shape[0]))
plt.show()
def baseline_clfmodels_eval_cv(
clf_list: List[BaseEstimator],
X_train: np.ndarray,
y_train: np.ndarray,
scaler: Callable,
kf: StratifiedKFold,
num_columns_idx: List[int],
multi_class: bool = None,
) -> pd.DataFrame:
"""Takes a list of models, training set,
training labels, numerical columns list and returns different classification scores in
a DataFrame"""
scores = defaultdict(list)
for clf in clf_list:
start = time.time()
scores["Classifier"].append(clf.__class__.__name__)
pipe = make_pipeline(
ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough",
),
clf,
)
for metric in [
"balanced_accuracy",
"accuracy",
"precision",
"recall",
"f1",
"f1_macro",
"average_precision",
"roc_auc",
]:
if multi_class and metric in [
"precision",
"recall",
"f1",
"average_precision",
"roc_auc",
]:
continue
elif not multi_class and metric == 'f1_macro':
continue
cross_val_metric = cross_val_score(
pipe, X_train, y_train, cv=kf, scoring=metric
)
score_name = " ".join(metric.split("_")).capitalize()
scores[score_name].append(np.mean(cross_val_metric))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df = score_df.round(3)
return score_df
def baseline_regmodels_eval_cv(
reg_list: List[BaseEstimator],
X_train: np.ndarray,
y_train: np.ndarray,
scaler: Callable,
kf: KFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a list of models, training set,
training labels, numerical columns list and returns different regression scores in
a DataFrame"""
scores = defaultdict(list)
for reg in reg_list:
start = time.time()
scores["Regressor"].append(reg.__class__.__name__)
pipe = make_pipeline(
ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough",
),
reg,
)
for metric in [
"neg_mean_squared_error",
"neg_root_mean_squared_error",
]:
cross_val_metric = cross_val_score(
pipe, X_train, y_train, cv=kf, scoring=metric
)
score_name = " ".join(metric.split("_")[1:]).capitalize()
scores[score_name].append(-np.mean(cross_val_metric))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Regressor")
score_df = score_df.round(3)
return score_df
class KmeansClustering(BaseEstimator, TransformerMixin):
"""Performs unsupervised clustering of training data numerical features
and returns a new array with numerical features and clusters labels"""
def __init__(self, n_clusters: int, scaler: BaseEstimator):
self.n_clusters = n_clusters
self.col_label = ["Kmeans_clusters"]
self.kmeans = None
self.scaler = scaler
def fit(self, X: np.ndarray, y=None):
return self
def transform(self, X: np.ndarray) -> np.ndarray:
if self.kmeans:
X_scaled = self.scaler.transform(X)
clusters = self.kmeans.predict(X_scaled)
return np.c_[X, clusters]
X_scaled = self.scaler.fit_transform(X)
self.kmeans = KMeans(n_clusters=self.n_clusters, n_init=50, random_state=42)
clusters = self.kmeans.fit_predict(X_scaled)
return np.c_[X, clusters]
class KmeansClusterDistance(BaseEstimator, TransformerMixin):
"""Performs training data numerical features distance calculation
to the cluster centroids and returns a new array with numerical features
and distance to every centroid"""
def __init__(self, n_clusters: int, scaler: BaseEstimator):
self.n_clusters = n_clusters
self.kmeans = None
self.col_labels = None
self.centroids = None
self.scaler = scaler
def fit(self, X: np.ndarray, y=None):
return self
def transform(self, X: np.ndarray) -> np.ndarray:
if self.centroids is not None:
X_scaled = self.scaler.transform(X)
test_centroids = self.kmeans.transform(X_scaled)
return np.c_[X, test_centroids]
X_scaled = self.scaler.fit_transform(X)
self.kmeans = KMeans(n_clusters=self.n_clusters, n_init=50, random_state=42)
self.centroids = self.kmeans.fit_transform(X_scaled)
self.col_labels = [f"Centroid_{i}" for i in range(self.centroids.shape[1])]
return np.c_[X, self.centroids]
class HDBSCANClustering(BaseEstimator, TransformerMixin):
"""Performs unsupervised clustering of training data numerical features
and returns a new array with numerical features and clusters labels"""
def __init__(
self,
min_cluster_size: int,
min_samples: int,
cluster_selection_epsilon: float,
scaler: BaseEstimator,
):
self.min_cluster_size = min_cluster_size
self.min_samples = min_samples
self.cluster_selection_epsilon = cluster_selection_epsilon
self.col_label = ["HDBSCAN_clusters"]
self.hdbscan = None
self.scaler = scaler
def fit(self, X: np.ndarray, y=None):
return self
def transform(self, X: np.ndarray) -> np.ndarray:
if self.hdbscan:
X_scaled = self.scaler.transform(X)
clusters = self.hdbscan.predict(X_scaled)
return np.c_[X, clusters]
X_scaled = self.scaler.fit_transform(X)
self.hdbscan = HDBSCAN(
min_cluster_size=self.min_cluster_size,
min_samples=self.min_samples,
cluster_selection_epsilon=self.cluster_selection_epsilon,
)
clusters = self.hdbscan.fit_predict(X_scaled)
return np.c_[X, clusters]
class NumericFeatureTransformation(BaseEstimator, TransformerMixin):
"""Performs training data numerical features transformations with the
passed list of functions and leaves only ones with stronger correlation
then original feature. Returns a new array with original numerical features
and new features"""
def __init__(
self,
num_col_labels: List[str],
num_col_idx: List[int],
func_list: List[Callable],
y: np.ndarray,
):
self.num_col_labels = num_col_labels
self.num_col_idx = num_col_idx
self.col_labels = []
self.func_list = func_list
self.y = y
self.test_check = False
def check_if_better(self, feature: np.ndarray, new_feature: np.ndarray):
if new_feature.shape[0] == self.y.shape[0]:
return (
True
if abs(round(np.corrcoef(feature, self.y)[0, 1], 3))
< abs(round(np.corrcoef(new_feature, self.y)[0, 1], 3))
else False
)
else:
self.test_check = True
def fit(self, X: np.ndarray, y=None):
return self
def transform(self, X: np.ndarray):
for label, idx in zip(self.num_col_labels, self.num_col_idx):
for func in self.func_list:
new_feature = func(X[:, idx])
new_feature_label = f"{label}_{func.__name__.split('_')[0]}"
if (
self.check_if_better(X[:, idx], new_feature)
and new_feature_label not in self.col_labels
and not self.test_check
):
self.col_labels.append(new_feature_label)
X = np.c_[X, new_feature]
else:
if self.test_check and new_feature_label in self.col_labels:
X = np.c_[X, new_feature]
return X
def xgboost_objective(
trial,
X: np.ndarray,
y: np.ndarray,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"XGBoost objective function to tune hyper parameters."
grid_params = {
"objective": objective,
"random_state": 42,
"verbosity": 0,
"n_jobs": -1,
"use_label_encoder": False,
"n_estimators": trial.suggest_categorical("n_estimators", [10000]),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3),
"num_leaves": trial.suggest_int("num_leaves", 20, 300, step=10),
"scale_pos_weight": trial.suggest_int("scale_pos_weight", 1, 100),
"min_child_weight": trial.suggest_int("min_child_weight", 0, 10),
"max_depth": trial.suggest_int("max_depth", 3, 12),
"gama": trial.suggest_int("gama", 0, 10),
"lambda": trial.suggest_int("lambda", 0, 100, step=5),
"alpha": trial.suggest_int("alpha", 0, 100, step=5),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = XGBClassifier(**grid_params)
model.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
eval_metric="logloss",
early_stopping_rounds=100,
callbacks=[XGBoostPruningCallback(trial, "validation_0-logloss"),],
verbose=False,
)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds)
return -np.mean(cv_scores)
def xgboost_objective_reg(
trial,
X: np.ndarray,
y: np.ndarray,
cv: KFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"XGBoost Regressor objective function to tune hyper parameters."
grid_params = {
"objective": objective,
"random_state": 42,
"verbosity": 0,
"n_jobs": -1,
"n_estimators": trial.suggest_categorical("n_estimators", [10000]),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3),
"num_leaves": trial.suggest_int("num_leaves", 20, 300, step=10),
"min_child_weight": trial.suggest_int("min_child_weight", 0, 10),
"max_depth": trial.suggest_int("max_depth", 3, 12),
"gama": trial.suggest_int("gama", 0, 10),
"lambda": trial.suggest_int("lambda", 0, 100, step=5),
"alpha": trial.suggest_int("alpha", 0, 100, step=5),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = XGBRegressor(**grid_params)
model.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
eval_metric="rmse",
early_stopping_rounds=100,
callbacks=[XGBoostPruningCallback(trial, "validation_0-rmse"),],
verbose=False,
)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds, squared=False)
return np.mean(cv_scores)
def light_gbm_objective(
trial,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"LightGBM objective function to tune hyper parameters."
grid_params = {
"objective": objective,
# "metric": "binary_logloss",
"metric": "multi_logloss",
"num_class": 35,
"random_state": 42,
"verbosity": -1,
"n_estimators": trial.suggest_categorical("n_estimators", [10000]),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3),
"num_leaves": trial.suggest_int("num_leaves", 10, 300, step=5),
"scale_pos_weight": trial.suggest_int("scale_pos_weight", 1, 100),
"max_depth": trial.suggest_int("max_depth", 3, 15),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 10, 200, step=10),
"lambda_l1": trial.suggest_int("lambda_l1", 0, 100, step=1),
"lambda_l2": trial.suggest_int("lambda_l2", 0, 100, step=1),
"min_gain_to_split": trial.suggest_float("min_gain_to_split", 0, 10),
}
call_back_metric = "multi_logloss"
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = LGBMClassifier(**grid_params)
model.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
callbacks=[
early_stopping(100),
LightGBMPruningCallback(trial, call_back_metric),
],
)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds, average="macro")
return -np.mean(cv_scores)
def light_gbm_objective_reg(
trial,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: KFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"LightGBM Regressor objective function to tune hyper parameters."
grid_params = {
"objective": objective,
"metric": "rmse",
"random_state": 42,
"verbosity": -1,
"n_estimators": trial.suggest_categorical("n_estimators", [10000]),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3),
"num_leaves": trial.suggest_int("num_leaves", 20, 300, step=20),
"max_depth": trial.suggest_int("max_depth", 3, 12),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 10, 100, step=5),
"lambda_l1": trial.suggest_int("lambda_l1", 0, 100, step=5),
"lambda_l2": trial.suggest_int("lambda_l2", 0, 100, step=5),
"min_gain_to_split": trial.suggest_float("min_gain_to_split", 0, 15),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = LGBMRegressor(**grid_params)
model.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
callbacks=[early_stopping(100), LightGBMPruningCallback(trial, "rmse"),],
)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds, squared=False)
return np.mean(cv_scores)
def cat_boost_objective(
trial,
X: np.ndarray,
y: np.ndarray,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"Cat Boost objective function to tune hyper parameters."
grid_params = {
"objective": objective,
"eval_metric": objective,
"grow_policy": "Lossguide",
"random_state": 42,
"verbose": 0,
"n_estimators": trial.suggest_categorical("n_estimators", [10000]),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3),
"num_leaves": trial.suggest_int("num_leaves", 20, 300, step=20),
"scale_pos_weight": trial.suggest_int("scale_pos_weight", 1, 100),
"max_depth": trial.suggest_int("max_depth", 3, 12),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 10, 100, step=5),
"l2_leaf_reg": trial.suggest_int("l2_leaf_reg", 0, 100, step=5),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = CatBoostClassifier(**grid_params)
model.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
early_stopping_rounds=100,
verbose=False,
)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds)
return -np.mean(cv_scores)
def cat_boost_objective_reg(
trial,
X: np.ndarray,
y: np.ndarray,
cv: KFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"Cat Boost Regressor objective function to tune hyper parameters."
grid_params = {
"objective": objective,
"eval_metric": objective,
"grow_policy": "Lossguide",
"random_state": 42,
"verbose": 0,
"n_estimators": trial.suggest_categorical("n_estimators", [10000]),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3),
"num_leaves": trial.suggest_int("num_leaves", 20, 300, step=20),
"max_depth": trial.suggest_int("max_depth", 3, 12),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 10, 100, step=5),
"l2_leaf_reg": trial.suggest_int("l2_leaf_reg", 0, 100, step=5),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = CatBoostRegressor(**grid_params)
model.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
early_stopping_rounds=100,
verbose=False,
)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds, squared=False)
return np.mean(cv_scores)
def sgd_clf_objective(
trial,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"SGD Classifier objective function to tune hyper parameters."
grid_params = {
"random_state": 42,
"verbose": 0,
"early_stopping": True,
"validation_fraction": 0.25,
"n_iter_no_change": 10,
"n_jobs": -1,
"eta0": 0.1,
"loss": trial.suggest_categorical("loss", ["hinge", "log"]),
"penalty": trial.suggest_categorical("penalty", ["l1", "l2"]),
"learning_rate": trial.suggest_categorical(
"learning_rate", ["optimal", "adaptive"]
),
"class_weight": trial.suggest_categorical("class_weight", ["balanced", None]),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = SGDClassifier(**grid_params)
model.fit(X_train_tr, y_train)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds)
return -np.mean(cv_scores)
def logr_clf_objective(
trial,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"Logistic Regression Classifier objective function to tune hyper parameters."
grid_params = {
"random_state": 42,
"verbose": 0,
"n_jobs": -1,
"solver": "saga",
"penalty": trial.suggest_categorical("penalty", ["l1", "l2", 'none']),
"C": trial.suggest_float("C", 0, 10),
"class_weight": trial.suggest_categorical("class_weight", ["balanced", None]),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = LogisticRegression(**grid_params)
model.fit(X_train_tr, y_train)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds, average="macro")
return -np.mean(cv_scores)
def dt_clf_objective(
trial,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> float:
"DT Classifier objective function to tune hyper parameters."
grid_params = {
"random_state": 42,
"criterion": trial.suggest_categorical("criterion", ["gini", "entropy"]),
"splitter": trial.suggest_categorical("splitter", ["best", "random"]),
"min_samples_split": trial.suggest_int("min_samples_split", 2, 300),
"min_samples_leaf": trial.suggest_int("min_samples_leaf", 2, 50),
"min_weight_fraction_leaf": trial.suggest_float(
"min_weight_fraction_leaf", 0, 0.5
),
"max_features": trial.suggest_categorical(
"max_features", ["auto", "sqrt", "log2"]
),
"class_weight": trial.suggest_categorical("class_weight", ["balanced", None]),
"max_depth": trial.suggest_int("max_depth", 3, 20),
}
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough"
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
model = DecisionTreeClassifier(**grid_params)
model.fit(X_train_tr, y_train)
preds = model.predict(X_val_tr)
cv_scores[idx] = metric(y_val, preds, average="macro")
return -np.mean(cv_scores)
def tune_model(
objective_func: Callable,
direction: str,
n_trials: int,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
objective: str,
metric,
) -> Tuple[float, dict]:
"Funtion to tune a model. Returns best value and tuned model hyper-parameters"
study = optuna.create_study(direction=direction)
func = lambda trial: objective_func(
trial, X, y, scaler, cv, num_columns_idx, objective, metric
)
study.optimize(func, n_trials=n_trials)
return round(study.best_value, 3), study.best_params
def pipeline_objective(
trial,
X: np.ndarray,
y: np.ndarray,
num_col_idx: List[int],
cat_col_idx: List[int],
cv: StratifiedKFold,
metric: str,
) -> float:
"Pre-processing pipeline optimization objective function"
smote_num = trial.suggest_categorical(
"num_smote_method", ["smote", "adasyn", "smoteenn", "smotetomek"]
)
if smote_num == "smote":
smote = SMOTE(random_state=42)
elif smote_num == "adasyn":
smote = ADASYN(random_state=42)
elif smote_num == "smoteenn":
smote = SMOTEENN(random_state=42)
else:
smote = SMOTETomek(random_state=42)
scalers = trial.suggest_categorical("scalers", ["minmax", "standard", "robust"])
if scalers == "minmax":
scaler = MinMaxScaler()
elif scalers == "standard":
scaler = StandardScaler()
else:
scaler = RobustScaler()
cat_encoders = trial.suggest_categorical("cat_encoders", ["ordinal", "one_hot"])
if cat_encoders == "ordinal":
encoder = OrdinalEncoder(handle_unknown="ignore")
else:
encoder = OneHotEncoder(handle_unknown="ignore")
clustering = trial.suggest_categorical("clustering", ["KMeans", "HDB", None])
if clustering == "KMeans":
num_of_clusters = trial.suggest_int("num_of_clusters", 2, 15)
clust_algo = KmeansClustering(n_clusters=num_of_clusters, scaler=scaler)
elif clustering == "HDB":
min_cluster_size = trial.suggest_int("min_cluster_size", 5, 100, step=5)
min_samples = trial.suggest_int("min_samples", 10, 100, step=10)
cluster_selection_epsilon = trial.suggest_float("epsilon", 0.1, 0.5)
clust_algo = HDBSCANClustering(
min_cluster_size=min_cluster_size,
min_samples=min_samples,
cluster_selection_epsilon=cluster_selection_epsilon,
scaler=scaler,
)
else:
clust_algo = "passthrough"
centroids = trial.suggest_categorical("kmeans_centroids", ["KMeansCentroids", None])
if centroids == "KMeansCentroids":
num_of_clusters = trial.suggest_int("num_of_centroids", 2, 15)
centroids_algo = KmeansClusterDistance(
n_clusters=num_of_clusters, scaler=scaler
)
else:
centroids_algo = "passthrough"
class_w = trial.suggest_categorical("log_reg_class_weights", ["balanced", None])
estimator = LogisticRegression(random_state=42, class_weight=class_w)
num_cat_transf = ColumnTransformer(
[
("numeric", scaler, num_col_idx),
("cat_enc", encoder, [cat_col_idx[0]]),
("one_hot", OneHotEncoder(handle_unknown="ignore"), [cat_col_idx[1]]),
],
)
pipeline = imb_make_pipeline(
num_cat_transf, smote, clust_algo, centroids_algo, estimator
)
score = cross_val_score(pipeline, X, y, cv=cv, scoring=metric)
return np.mean(score)
def pipeline_objective_2(
trial,
X: np.ndarray,
y: np.ndarray,
num_col_idx: List[int],
cat_col_idx: List[int],
cv: StratifiedKFold,
metric: str,
) -> float:
"Pre-processing pipeline optimization objective function"
scalers = trial.suggest_categorical("scalers", ["minmax", "standard", "robust"])
if scalers == "minmax":
scaler = MinMaxScaler()
elif scalers == "standard":
scaler = StandardScaler()
else:
scaler = RobustScaler()
cat_encoders = trial.suggest_categorical("cat_encoders", ["ordinal", "one_hot"])
if cat_encoders == "ordinal":
encoder = OrdinalEncoder(handle_unknown="ignore")
else:
encoder = OneHotEncoder(handle_unknown="ignore")
clustering = trial.suggest_categorical("clustering", ["KMeans", None])
if clustering == "KMeans":
num_of_clusters = trial.suggest_int("num_of_clusters", 2, 15)
clust_algo = KmeansClustering(n_clusters=num_of_clusters, scaler=scaler)
else:
clust_algo = "passthrough"
centroids = trial.suggest_categorical("kmeans_centroids", ["KMeansCentroids", None])
if centroids == "KMeansCentroids":
num_of_clusters = trial.suggest_int("num_of_centroids", 2, 15)
centroids_algo = KmeansClusterDistance(
n_clusters=num_of_clusters, scaler=scaler
)
else:
centroids_algo = "passthrough"
class_w = trial.suggest_categorical("log_reg_class_weights", ["balanced", None])
estimator = SGDClassifier(class_weight=class_w, random_state=42)
num_cat_transf = ColumnTransformer(
[("numeric", scaler, num_col_idx), ("cat_enc", encoder, cat_col_idx),]
)
pipeline = imb_make_pipeline(num_cat_transf, clust_algo, centroids_algo, estimator)
score = cross_val_score(pipeline, X, y, cv=cv, scoring=metric)
return np.mean(score)
def tune_pipeline(
objective_func: Callable,
direction: str,
n_trials: int,
X: np.ndarray,
y: np.ndarray,
num_col_idx: List[int],
cat_col_idx: List[int],
cv: StratifiedKFold,
metric: str,
) -> Tuple[float, dict]:
"""Funtion to tune a numerical pipeline. Returns best value and tuned
pipeline hyper-parameters"""
study = optuna.create_study(direction=direction)
func = lambda trial: objective_func(
trial, X, y, num_col_idx, cat_col_idx, cv, metric
)
study.optimize(func, n_trials=n_trials)
return round(study.best_value, 3), study.best_params
def pipeline_objective_smotenc(
trial,
X: np.ndarray,
y: np.ndarray,
cat_col_idx: List[int],
scaler: Callable,
cv: StratifiedKFold,
metric: str,
) -> float:
"Pre-processing pipeline optimization objective function with Smotenc"
smote_num = trial.suggest_categorical("num_smote_method", ["Smotenc", None])
if smote_num == "smotenc":
smote = SMOTENC(categorical_features=cat_col_idx, random_state=42)
else:
smote = "passthrough"
clustering = trial.suggest_categorical("clustering", ["KMeans", None])
if clustering == "KMeans":
num_of_clusters = trial.suggest_int("num_of_clusters", 2, 15)
clust_algo = KmeansClustering(n_clusters=num_of_clusters, scaler=scaler)
else:
clust_algo = "passthrough"
centroids = trial.suggest_categorical("kmeans_centroids", ["KMeansCentroids", None])
if centroids == "KMeansCentroids":
num_of_clusters = trial.suggest_int("num_of_centroids", 2, 15)
centroids_algo = KmeansClusterDistance(
n_clusters=num_of_clusters, scaler=scaler
)
else:
centroids_algo = "passthrough"
class_w = trial.suggest_categorical("log_reg_class_weights", ["balanced", None])
estimator = LogisticRegression(random_state=42, class_weight=class_w)
pipeline = imb_make_pipeline(smote, clust_algo, centroids_algo, estimator)
score = cross_val_score(pipeline, X, y, cv=cv, scoring=metric)
return np.mean(score)
def pipeline_objective_smotenc_2(
trial,
X: np.ndarray,
y: np.ndarray,
cat_col_idx: List[int],
scaler: Callable,
cv: StratifiedKFold,
metric: str,
) -> float:
"Pre-processing pipeline optimization objective function with Smotenc"
smote_num = trial.suggest_categorical("num_smote_method", ["Smotenc", None])
if smote_num == "smotenc":
smote = SMOTENC(categorical_features=cat_col_idx, random_state=42)
else:
smote = "passthrough"
clustering = trial.suggest_categorical("clustering", ["KMeans", None])
if clustering == "KMeans":
num_of_clusters = trial.suggest_int("num_of_clusters", 2, 15)
clust_algo = KmeansClustering(n_clusters=num_of_clusters, scaler=scaler)
else:
clust_algo = "passthrough"
centroids = trial.suggest_categorical("kmeans_centroids", ["KMeansCentroids", None])
if centroids == "KMeansCentroids":
num_of_clusters = trial.suggest_int("num_of_centroids", 2, 15)
centroids_algo = KmeansClusterDistance(
n_clusters=num_of_clusters, scaler=scaler
)
else:
centroids_algo = "passthrough"
cat_transf = ColumnTransformer(
[("cat_enc", OneHotEncoder(handle_unknown="ignore"), cat_col_idx)],
remainder="passthrough",
)
class_w = trial.suggest_categorical("log_reg_class_weights", ["balanced", None])
estimator = SGDClassifier(class_weight=class_w, random_state=42)
pipeline = imb_make_pipeline(
smote, cat_transf, clust_algo, centroids_algo, estimator
)
score = cross_val_score(pipeline, X, y, cv=cv, scoring=metric)
return np.mean(score)
def tune_pipeline_smotenc(
objective_func: Callable,
direction: str,
n_trials: int,
X: np.ndarray,
y: np.ndarray,
cat_col_idx: List[int],
scaler: Callable,
cv: StratifiedKFold,
metric: str,
) -> Tuple[float, dict]:
"""Funtion to tune a numerical pipeline. Returns best value and tuned
pipeline hyper-parameters"""
study = optuna.create_study(direction=direction)
func = lambda trial: objective_func(trial, X, y, cat_col_idx, scaler, cv, metric)
study.optimize(func, n_trials=n_trials)
return round(study.best_value, 3), study.best_params
def xgboost_hptuned_eval_cv(
clf: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
cv: StratifiedKFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a XGBoost model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different classification scores in
a DataFrame"""
scores = defaultdict(list)
best_ntree_limit = np.empty(cv.n_splits)
start = time.time()
scores["Classifier"].append(clf.__class__.__name__)
for i, metric in enumerate(
[
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]
):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)],
remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
clf.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
early_stopping_rounds=100,
verbose=False,
)
if score_name in ["Average precision", "Roc auc"]:
y_val_pred = clf.predict_proba(X_val_tr)[:, 1]
else:
y_val_pred = clf.predict(X_val_tr)
cv_scores[idx] = metric(y_val, y_val_pred)
best_ntree_limit[idx] = clf.best_ntree_limit
if i == 0:
scores[f"Max numb of trees_{score_name}"] = int(max(best_ntree_limit))
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df = score_df.round(3)
return score_df
def xgboost_reg_hptuned_eval_cv(
reg: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
cv: KFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a XGBoost Regression model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different regression scores in
a DataFrame"""
scores = defaultdict(list)
best_ntree_limit = np.empty(cv.n_splits)
start = time.time()
scores["Regressor"].append(reg.__class__.__name__)
for i, metric in enumerate([mean_squared_error, mean_squared_error, r2_score]):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)],
remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
reg.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
early_stopping_rounds=100,
verbose=False,
)
y_val_pred = reg.predict(X_val_tr)
if i == 1:
cv_scores[idx] = metric(y_val, y_val_pred, squared=False)
else:
cv_scores[idx] = metric(y_val, y_val_pred)
best_ntree_limit[idx] = reg.best_ntree_limit
if i == 0:
scores[f"Max numb of trees_Root {score_name.lower()}"] = int(
max(best_ntree_limit)
)
if i == 1:
score_name = f"Root {score_name.lower()}"
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Regressor")
score_df = score_df.round(3)
return score_df
def light_gbm_hptuned_eval_cv(
clf: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
multi_class: bool = None,
) -> pd.DataFrame:
"""Takes a LightGBM model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different classification scores in
a DataFrame"""
scores = defaultdict(list)
best_ntree_limit = np.empty(cv.n_splits)
start = time.time()
scores["Classifier"].append(clf.__class__.__name__)
for i, metric in enumerate(
[
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]
):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
if multi_class and score_name in [
"Precision",
"Recall",
"Average precision",
"Roc auc",
]:
continue
cv_scores = np.empty(cv.n_splits)
if multi_class and score_name == "F1":
score_name = f"{score_name} {multi_class}"
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
clf.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
callbacks=[early_stopping(100)],
)
if score_name in ["Average precision", "Roc auc"]:
y_val_pred = clf.predict_proba(X_val_tr)[:, 1]
else:
y_val_pred = clf.predict(X_val_tr)
if score_name == "F1 macro":
cv_scores[idx] = metric(y_val, y_val_pred, average=multi_class)
else:
cv_scores[idx] = metric(y_val, y_val_pred)
best_ntree_limit[idx] = clf.best_iteration_
if i == 0:
scores["Max numb of trees"] = int(max(best_ntree_limit)) + 1
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df = score_df.round(3)
return score_df
def sgdclf_hptuned_eval_cv(
clf: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: StratifiedKFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a SGD Classifier model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different classification scores in
a DataFrame"""
scores = defaultdict(list)
start = time.time()
scores["Classifier"].append(clf.__class__.__name__)
for i, metric in enumerate(
[
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]
):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)], remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
clf.fit(X_train_tr, y_train)
if score_name in ["Average precision", "Roc auc"]:
y_val_pred = clf.predict_proba(X_val_tr)[:, 1]
else:
y_val_pred = clf.predict(X_val_tr)
cv_scores[idx] = metric(y_val, y_val_pred)
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df = score_df.round(3)
return score_df
def light_gbm_reg_hptuned_eval_cv(
reg: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
scaler: Callable,
cv: KFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a LightGBM Regression model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different regression scores in
a DataFrame"""
scores = defaultdict(list)
best_ntree_limit = np.empty(cv.n_splits)
start = time.time()
scores["Regressor"].append(reg.__class__.__name__)
for i, metric in enumerate([mean_squared_error, mean_squared_error, r2_score]):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", scaler, num_columns_idx)],
remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
reg.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
callbacks=[early_stopping(100)],
)
y_val_pred = reg.predict(X_val_tr)
if i == 1:
cv_scores[idx] = metric(y_val, y_val_pred, squared=False)
else:
cv_scores[idx] = metric(y_val, y_val_pred)
best_ntree_limit[idx] = reg.best_iteration_
if i == 0:
scores[f"Max numb of trees_Root {score_name.lower()}"] = int(
max(best_ntree_limit)
)
if i == 1:
score_name = f"Root {score_name.lower()}"
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Regressor")
score_df = score_df.round(3)
return score_df
def catboost_hptuned_eval_cv(
clf: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
cv: StratifiedKFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a CatBoost model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different classification scores in
a DataFrame"""
scores = defaultdict(list)
best_ntree_limit = np.empty(cv.n_splits)
start = time.time()
scores["Classifier"].append(clf.__class__.__name__)
for i, metric in enumerate(
[
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]
):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)],
remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
clf.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
early_stopping_rounds=100,
)
if score_name in ["Average precision", "Roc auc"]:
y_val_pred = clf.predict_proba(X_val_tr)[:, 1]
else:
y_val_pred = clf.predict(X_val_tr)
cv_scores[idx] = metric(y_val, y_val_pred)
best_ntree_limit[idx] = clf.get_best_iteration()
if i == 0:
scores[f"Max numb of trees_{score_name}"] = int(max(best_ntree_limit)) + 1
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df = score_df.round(3)
return score_df
def catboost_reg_hptuned_eval_cv(
reg: BaseEstimator,
X: np.ndarray,
y: np.ndarray,
cv: KFold,
num_columns_idx: List[int],
) -> pd.DataFrame:
"""Takes a CatBoost Regression model with tuned hyper-parameters, training set,
training labels, numerical columns list and returns different regression scores in
a DataFrame"""
scores = defaultdict(list)
best_ntree_limit = np.empty(cv.n_splits)
start = time.time()
scores["Regressor"].append(reg.__class__.__name__)
for i, metric in enumerate([mean_squared_error, mean_squared_error, r2_score]):
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
cv_scores = np.empty(cv.n_splits)
for idx, (train_idx, val_idx) in enumerate(cv.split(X, y)):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
numeric_scaler = ColumnTransformer(
[("numeric", StandardScaler(), num_columns_idx)],
remainder="passthrough",
)
X_train_tr = numeric_scaler.fit_transform(X_train)
X_val_tr = numeric_scaler.transform(X_val)
reg.fit(
X_train_tr,
y_train,
eval_set=[(X_val_tr, y_val)],
early_stopping_rounds=100,
)
y_val_pred = reg.predict(X_val_tr)
if i == 1:
cv_scores[idx] = metric(y_val, y_val_pred, squared=False)
else:
cv_scores[idx] = metric(y_val, y_val_pred)
best_ntree_limit[idx] = reg.get_best_iteration()
if i == 0:
scores[f"Max numb of trees_Root {score_name.lower()}"] = int(
max(best_ntree_limit)
)
if i == 1:
score_name = f"Root {score_name.lower()}"
scores[score_name].append(np.mean(cv_scores))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Regressor")
score_df = score_df.round(3)
return score_df
def clfmodels_eval_test(
clf_list: List[BaseEstimator],
X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
multi_class: bool = None,
) -> pd.DataFrame:
"""Takes a list of models, transformation pipeline, training set,
training labels, test set, test labels and returns
different classification scores on a test set in a DataFrame"""
scores = defaultdict(list)
for clf in clf_list:
start = time.time()
scores["Classifier"].append(clf.__class__.__name__)
clf.fit(X_train, y_train)
y_test_pred = clf.predict(X_test)
for metric in [
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]:
score_name = (
metric.__name__.replace("_", " ")
.replace("score", "")
.capitalize()
.strip()
)
if multi_class and score_name in [
"Precision",
"Recall",
"Average precision",
"Roc auc",
]:
continue
if score_name in ["Average precision", "Roc auc"]:
y_test_pred = clf.predict_proba(X_test)[:, 1]
if multi_class and score_name == "F1":
score_name = f"{score_name} {multi_class}"
scores[score_name].append(
metric(y_test, y_test_pred, average=multi_class)
)
else:
scores[score_name].append(metric(y_test, y_test_pred))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Classifier")
score_df = score_df.round(3)
return score_df
def regmodels_eval_test(
reg_list: List[BaseEstimator],
X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
) -> pd.DataFrame:
"""Takes a list of models, transformation pipeline, training set,
training labels, test set, test labels and returns
different regression scores on a test set in a DataFrame"""
scores = defaultdict(list)
for reg in reg_list:
start = time.time()
scores["Regressor"].append(reg.__class__.__name__)
reg.fit(X_train, y_train)
y_test_pred = reg.predict(X_test)
for i, metric in enumerate([mean_squared_error, mean_squared_error, r2_score]):
score_name = (
metric.__name__.replace("_", " ")
.replace("score", "")
.capitalize()
.strip()
)
if i == 1:
score_name = f"Root {score_name.lower()}"
scores[score_name].append(metric(y_test, y_test_pred, squared=False))
else:
scores[score_name].append(metric(y_test, y_test_pred))
end = time.time()
scores["Total time in sec"].append((end - start))
score_df = pd.DataFrame(scores).set_index("Regressor")
score_df = score_df.round(3)
return score_df
def votingclf_scores(
votingclf: BaseEstimator,
X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
) -> pd.DataFrame:
"""Takes a voting model, training set,
training labels, test set and returns
different classification scores on a test set in a DataFrame"""
scores = defaultdict(list)
for voting in ("hard", "soft"):
start = time.time()
scores["Classifier"].append(f"{voting.capitalize()} Voting Classifier")
votingclf.set_params(voting=voting)
votingclf.fit(X_train, y_train)
y_test_pred = votingclf.predict(X_test)
for metric in [
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]:
score_name = (
metric.__name__.replace("_", " ")
.replace("score", "")
.capitalize()
.strip()
)
if score_name in ["Average precision", "Roc auc"]:
if voting == "hard":
scores[score_name].append(np.nan)
else:
y_test_pred = votingclf.predict_proba(X_test)[:, 1]
scores[score_name].append(metric(y_test, y_test_pred))
else:
scores[score_name].append(metric(y_test, y_test_pred))
end = time.time()
scores["Total time in sec"].append((end - start))
voting_score_df = pd.DataFrame(scores).set_index("Classifier")
voting_score_df = voting_score_df.round(3)
return voting_score_df
def stackingclf_scores(
stackingclf: BaseEstimator,
X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
) -> pd.DataFrame:
"""Takes a stacking model, training set,
training labels, test set and returns
different classification scores on a test set in a DataFrame"""
scores = defaultdict(list)
start = time.time()
stackingclf.fit(X_train, y_train)
y_test_pred = stackingclf.predict(X_test)
scores["Classifier"].append(
f"Stacking Classifier with Logistic Reg as final estimator"
)
for metric in [
balanced_accuracy_score,
accuracy_score,
precision_score,
recall_score,
f1_score,
average_precision_score,
roc_auc_score,
]:
score_name = (
metric.__name__.replace("_", " ").replace("score", "").capitalize().strip()
)
if score_name in ["Average precision", "Roc auc"]:
y_test_pred = stackingclf.predict_proba(X_test)[:, 1]
scores[score_name].append(metric(y_test, y_test_pred))
else:
scores[score_name].append(metric(y_test, y_test_pred))
end = time.time()
scores["Total time in sec"].append((end - start))
stacking_score_df = pd.DataFrame(scores).set_index("Classifier")
stacking_score_df = stacking_score_df.round(3)
return stacking_score_df
def quartile_proportions(data: pd.DataFrame, column_name: str) -> pd.Series:
"Counts proporttions of a column values and returns series"
return data[column_name].value_counts() / len(data)
def target_stratification_test(data: pd.DataFrame, target: str) -> pd.DataFrame:
"""Takes pandas data frame and target column name and returns a random train_test split,
a stratified by quartiles train_test split and errors percentage"""
df = data.copy()
quartile_list = [0, 0.25, 0.5, 0.75, 1.0]
labels = [0, 1, 2, 3]
df[f"{target}_quartiles"] = pd.qcut(df[target], quartile_list, labels)
train_data_random, test_data_random = train_test_split(df, random_state=42)
train_data_strat, test_data_strat = train_test_split(
df, stratify=df[f"{target}_quartiles"], random_state=42
)
overall = quartile_proportions(df, f"{target}_quartiles")
random_train = quartile_proportions(train_data_random, f"{target}_quartiles")
strat_train = quartile_proportions(train_data_strat, f"{target}_quartiles")
random_test = quartile_proportions(test_data_random, f"{target}_quartiles")
strat_test = quartile_proportions(test_data_strat, f"{target}_quartiles")
compare_props = pd.DataFrame(
{
"Overall": overall,
"Random_train_set": random_train,
"Stratified_train_set": strat_train,
"Random_test_set": random_test,
"Stratified_test_set": strat_test,
}
).sort_index()
compare_props["Rand_train_set %error"] = (
100 * compare_props["Random_train_set"] / compare_props["Overall"] - 100
)
compare_props["Strat_train_set %error"] = (
100 * compare_props["Stratified_train_set"] / compare_props["Overall"] - 100
)
compare_props["Rand_test_set %error"] = (
100 * compare_props["Random_test_set"] / compare_props["Overall"] - 100
)
compare_props["Strat_test_set %error"] = (
100 * compare_props["Stratified_train_set"] / compare_props["Overall"] - 100
)
return compare_props
def stratify_regression_data(
data: pd.DataFrame, target: str
) -> Tuple[pd.DataFrame, ...]:
"""Takes a pandas DataFrame and returns train and test sets stratified by quartiles
for data and target"""
df = data.copy()
quartile_list = [0, 0.25, 0.5, 0.75, 1.0]
labels = [0, 1, 2, 3]
df[f"{target}_quartiles"] = | pd.qcut(df[target], quartile_list, labels) | pandas.qcut |
from pathlib import Path
import pandas as pd
import numpy as np
import re
from collections import Counter
import logging
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel( logging.INFO )
__all__ = ['read_geo', 'detect_header_pattern']
''' circular imports problems --- https://stackabuse.com/python-circular-imports/
try:
# first: try to map the canonical version here
import methylprep
read_geo = methylprep.read_geo
except ImportError as error:
# if user doesn't have methylprep installed for the canonical version of this function, import this copy below
'''
def read_geo_v1(filepath, verbose=False, debug=False):
"""Use to load preprocessed GEO data into methylcheck. Attempts to find the sample beta/M_values
in the CSV/TXT/XLSX file and turn it into a clean dataframe, with probe ids in the index/rows.
VERSION 1.0 (deprecated June 2020 for v3, called "read_geo")
- reads a downloaded file, either in csv, xlsx, pickle, txt
- looks for /d_RxxCxx patterned headings and an probe index
- sets index in df to probes
- sets columns to sample names
- forces probe values to be floats, if strings/mixed
- if filename has 'intensit' or 'signal' in it, this converts to betas and saves
even if filename doesn't match, if columns have Methylated in them, it will convert and save
- detect multi-line headers and adjusts dataframe columns accordingly
- returns the usable dataframe
TODO:
- handle files with .Signal_A and .Signal_B instead of Meth/Unmeth
- handle processed files with sample_XX
notes:
this makes inferences based on strings in the filename, and based on the column names.
"""
this = Path(filepath)
if '.csv' in this.suffixes:
raw = pd.read_csv(this)
elif '.xlsx' in this.suffixes:
raw = pd.read_excel(this)
elif '.pkl' in this.suffixes:
raw = pd.read_pickle(this)
return raw
elif '.txt' in this.suffixes:
raw = pd.read_csv(this, sep='\t')
if raw.shape[1] == 1: # pandas doesn't handle \r\n two char line terminators, but seems to handle windows default if unspecified.
raw = pd.read_csv(this, sep='\t', lineterminator='\r') # leaves \n in values of first column, but loads
# lineterminator='\r\n')
# or use codecs first to load and parse text file before dataframing...
else:
LOGGER.error(f'ERROR: this file type (){this.suffix}) is not supported')
return
# next, see if betas are present of do we need to calculate them?
test = raw.iloc[0:100]
unmeth = False
if 'intensit' in str(this.name).lower() or 'signal' in str(this.name).lower(): # signal intensities
unmeth = True # need to calculate beta from unmeth/meth columns
LOGGER.info('Expecting raw meth/unmeth probe data')
else:
#meth_pattern_v1 = re.compile(r'.*[_ \.]Methylated[_ \.]', re.I)
meth_pattern = re.compile(r'.*[_ \.]?(Un)?methylated[_ \.]?', re.I)
meth_cols = len([col for col in test.columns if re.match(meth_pattern, col)])
if meth_cols > 0:
unmeth = True
# this should work below, so that even if betas are present, it will use betas first, then fall back to meth/unmeth
def calculate_beta_value(methylated_series, unmethylated_series, offset=100):
""" borrowed from methylprep.processing.postprocess.py """
methylated = max(methylated_series, 0)
unmethylated = max(unmethylated_series, 0)
total_intensity = methylated + unmethylated + offset
intensity_ratio = methylated / total_intensity
return intensity_ratio
# look for probe names in values (of first 100 responses)
index_name = None
multiline_header = False
sample_pattern = re.compile(r'\w?\d+_R\d{2}C\d{2}$') # $ ensures column ends with the regex part
sample_pattern_loose = re.compile(r'\w?\d+_R\d{2}C\d{2}.*beta', re.I)
probe_pattern = re.compile(r'(cg|rs|ch\.\d+\.|ch\.X\.|ch\.Y\.)\d+')
samples = []
for col in test.columns:
probes = [i for i in test[col] if type(i) == str and re.match(probe_pattern,i)] #re.match('cg\d+',i)]
if len(probes) == len(test):
index_name = col
if verbose:
LOGGER.info(f"Found probe names in `{col}` column and setting as index.")
elif len(probes)/len(test) > 0.8:
index_name = col
multiline_header = True
break
if re.match(sample_pattern, col):
samples.append(col)
if multiline_header: # start over with new column names
try:
start_index = len(test) - len(probes) - 1
# recast without header, starting at row before first probe
new_column_names = pd.Series(list(raw.iloc[start_index])).replace(np.nan, 'No label')
probe_list = raw[index_name].iloc[start_index + 1:]
probe_list = probe_list.rename(raw[index_name].iloc[start_index + 1])
bad_probe_list = [probe for probe in probe_list if not re.match(probe_pattern, probe)] # not probe.startswith('cg')]
if bad_probe_list != []:
LOGGER.error(f'ERROR reading probes with multiline header: {bad_probe_list[:200]}')
return
raw = raw.iloc[start_index + 1:]
raw.columns = new_column_names
test = raw.iloc[0:100]
samples = []
for col in test.columns:
if re.match(sample_pattern, col):
samples.append(col)
# raw has changed.
out_df = pd.DataFrame(index=probe_list)
except Exception as e:
LOGGER.error("ERROR: Unable to parse the multi-line header in this file. If you manually edit the file headers to ensure the sample intensities unclude 'Methylated' and 'Unmethylated' in column names, it might work on retry.")
return
else:
out_df = pd.DataFrame(index=raw[index_name]) # only used with unmethylated data sets
if samples == []:
# in some cases there are multiple columns matching sample_ids, and we want the 'beta' one
for col in test.columns:
if re.match(sample_pattern_loose, col):
samples.append(col)
# or we need TWO columns per sample and we calculate 'beta'.
if samples == [] and unmeth:
unmeth_samples = []
meth_samples = []
#unmeth_pattern_v1 = re.compile(r'.*[_ \.]Unmethylated[_ \.].*', re.I)
#meth_pattern_v1 = re.compile(r'.*[_ \.]Methylated[_ \.].*', re.I)
unmeth_pattern = re.compile(r'.*[_ \.]?Unmethylated[_ \.]?', re.I)
meth_pattern = re.compile(r'.*[_ \.]?(?<!Un)Methylated[_ \.]?', re.I)
for col in test.columns:
if re.match(unmeth_pattern, col):
unmeth_samples.append(col)
if debug:
LOGGER.info(col)
if re.match(meth_pattern, col):
meth_samples.append(col)
if debug:
LOGGER.info(col)
if unmeth_samples != [] and meth_samples != [] and len(unmeth_samples) == len(meth_samples):
# next: just need to match these up. they should be same if we drop the meth/unmeth part
if verbose:
LOGGER.info(f"{len(unmeth_samples)} Samples with Methylated/Unmethylated probes intensities found. Calculating Beta Values.")
linked = []
for col in unmeth_samples:
test_name = col.replace('Unmethylated','Methylated')
if test_name in meth_samples:
linked.append([col, test_name])
# Here, we calculate betas for full raw data frame
for col_u, col_m in linked:
col_name = col_u.replace('Unmethylated','').replace('Signal','').strip()
unmeth_series = raw[col_u]
meth_series = raw[col_m]
betas = calculate_beta_value(meth_series, unmeth_series)
try:
out_df[col_name] = betas
samples.append(col_name)
except Exception as e:
LOGGER.error('ERROR', col_name, len(betas), out_df.shape, e)
elif unmeth:
LOGGER.info(f"File appears to contain probe intensities, but the column names don't match up for samples, so can't calculate beta values.")
if samples != [] and verbose and not unmeth:
LOGGER.info(f"Found {len(samples)} samples on second pass, apparently beta values with a non-standard sample_naming convention.")
elif samples != [] and verbose and unmeth:
pass
elif samples == []:
# no samples matched, so show the columns instead
LOGGER.info(f"No samples found. Here are some column names:")
LOGGER.info(list(test.columns)[:20])
return
if index_name == None:
LOGGER.error("Error: probe names not found in any columns")
return
if unmeth and samples != [] and out_df.shape[1] > 1:
# column names are being merged and remapped here as betas
df = out_df # index is already set
elif multiline_header:
df = raw.loc[:, samples]
df.index = probe_list
else:
df = raw[[index_name] + samples]
df = df.set_index(index_name)
# finally, force probe values to be floats
num_converted = 0
for col in df.columns:
if df[col].dtype.kind != 'f' and df[col].dtype.kind == 'O':
# convert string to float
try:
#df[col] = df[col].astype('float16')
# use THIS when mix of numbers and strings
df[col] = pd.to_numeric(df[col], errors='coerce')
num_converted += 1
except:
LOGGER.error('error')
df = df.drop(columns=[col])
if verbose:
if num_converted > 0:
LOGGER.info(f"Converted {num_converted} samples from string to float16.")
LOGGER.info(f"Found {len(samples)} samples and dropped {len(raw.columns) - len(samples)} meta data columns.")
return df
def pd_read_big_csv(filepath, max_cols=1000, **kwargs):
header=pd.read_csv(filepath, nrows=10, **kwargs)
ncols=len(header.columns)
if ncols <= max_cols:
return | pd.read_csv(filepath, **kwargs) | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import requests
import logging
import argparse
import re
import pathlib
API_KEY = '<KEY>'
MAX_VARS = 50
STATE_CODES = {'Alabama': ('AL', '01'), 'Alaska': ('AK', '02'),
'Arizona': ('AZ', '04'), 'Arkansas': ('AR', '05'),
'California': ('CA', '06'), 'Colorado': ('CO', '08'),
'Connecticut': ('CT', '09'), 'Delaware': ('DE', '10'),
'District of Columbia': ('DC', '11'), 'Florida': ('FL', '12'),
'Georgia': ('GA', '13'), 'Hawaii': ('HI', '15'),
'Idaho': ('ID', '16'), 'Illinois': ('IL', '17'),
'Indiana': ('IN', '18'), 'Iowa': ('IA', '19'),
'Kansas': ('KS', '20'), 'Kentucky': ('KY', '21'),
'Louisiana': ('LA', '22'), 'Maine': ('ME', '23'),
'Maryland': ('MD', '24'), 'Massachusetts': ('MA', '25'),
'Michigan': ('MI', '26'), 'Minnesota': ('MN', '27'),
'Mississippi': ('MS', '28'), 'Missouri': ('MO', '29'),
'Montana': ('MT', '30'), 'Nebraska': ('NE', '31'),
'Nevada': ('NV', '32'), 'New Hampshire': ('NH', '33'),
'New Jersey': ('NJ', '34'), 'New Mexico': ('NM', '35'),
'New York': ('NY', '36'), 'North Carolina': ('NC', '37'),
'North Dakota': ('ND', '38'), 'Ohio': ('OH', '39'),
'Oklahoma': ('OK', '40'), 'Oregon': ('OR', '41'),
'Pennsylvania': ('PA', '42'), 'Rhode Island': ('RI', '44'),
'South Carolina': ('SC', '45'), 'South Dakota': ('SD', '46'),
'Tennessee': ('TN', '47'), 'Texas': ('TX', '48'),
'Utah': ('UT', '49'), 'Vermont': ('VT', '50'),
'Virginia': ('VA', '51'), 'Washington': ('WA', '53'),
'West Virginia': ('WV', '54'), 'Wisconsin': ('WI', '55'),
'Wyoming': ('WY', '56')}
def extract_vars(vars_file):
""" extract vars to be downloaded from munging file
:param vars_file: string path and filename to list of variables
should have column header: variable_name
if doing transformations, headers should be (tab delimited):
variable_name, operator, argument1, argument2
:returns: list of sorted variable names
"""
# get vars/transform file
do_transforms = False
try:
transforms = pd.read_csv(vars_file, sep='\t')
except:
logging.error('unable to read {0}'.format(vars_file))
return
if 'variable_name' not in transforms.columns:
logging.error('missing variable_name column')
return
if len(transforms.columns) > 1:
req_cols = ['operator', 'argument1', 'argument2']
if len([x for x in req_cols if x in transforms.columns]) != \
len(req_cols):
logging.warn('missing some required columns for transforms.'
' will pull raw variables instead')
else:
do_transforms = True
# get vars into a list
try:
f = open(vars_file, 'r')
acs_vars = f.readlines()
acs_vars = [vars.strip() for vars in acs_vars if vars.strip()]
except:
logging.error('unable to read vars file {0}'.format(vars_file))
return
# get variable and file info
if not do_transforms:
raw_vars = transforms['variable_name'].values.tolist()
else:
arg1s = [x for x in transforms['argument1'].astype(str).values.tolist()
if re.match(r'^[BCD][\d_]+$', x)]
arg2s = [x for x in transforms['argument2'].astype(str).values.tolist()
if re.match(r'^[BCD][\d_]+$', x)]
raw_vars = list(set(arg1s + arg2s))
raw_vars = sorted(raw_vars)
return transforms, raw_vars
def do_transformations(data_df, transform_df):
""" perform transformation on data frame as listed in transform data frame
The transform dataframe should have four columns:
new_variable, operator, argument1, argument 2
new_variable is the variable to create
operator is the operation to perform
currently supports: -, +, *, /, =
argument1/2 are either column names or scalar values.
Operations are performed sequentially so you can include a new variable
created via the transforms as an argument in a later calculation.
:param data_df: pandas dataframe with raw data
:param transform_df: pandas dataframe with transformations
:return: transformed pandas dataframe
"""
ok_operators = {'-', '+', '*', '/', '='}
transform_df['operator'] = transform_df['operator'].str.strip()
for r in transform_df.itertuples():
if r.operator in ok_operators:
try:
rarg1 = str(r.argument1).strip()
rarg2 = str(r.argument2).strip()
if rarg1 in data_df.columns:
arg1 = data_df[rarg1]
else:
arg1 = float(rarg1)
if rarg2 in data_df.columns:
arg2 = data_df[rarg2]
elif rarg2.lower() != 'nan':
arg2 = float(rarg2)
else:
arg2 = ''
if r.operator == '-':
data_df[r.variable_name] = arg1 - arg2
elif r.operator == '+':
data_df[r.variable_name] = arg1 + arg2
elif r.operator == '*':
data_df[r.variable_name] = arg1 * arg2
elif r.operator == '=':
data_df[r.variable_name] = arg1
elif r.operator == '/':
if r.argument2 and r.argument2 != 'NaN':
if isinstance(arg2, float):
data_df[r.variable_name] = arg1 / arg2
else:
valid_mask = (data_df[r.argument2].notnull()) & \
(data_df[r.argument2] != 0)
data_df[r.variable_name] = (arg1 / arg2).\
where(valid_mask)
data_df.loc[~valid_mask, r.variable_name] = np.nan
else:
logging.warning('tried to do bad division of {0}/{1}'.\
format(r.argument1, r.argument2))
except:
logging.warning('invalid argument(s): {0}, {1}'.\
format(r.argument1, r.argument2))
return data_df
def get_vars_lookup(year, vars, transforms):
""" downloads variable template data from census
:param year: string four-digit year
:param vars: list of strings of var names
:param transforms: DataFrame of transformations
:return: list of vars and their transformations
"""
url = ("https://api.census.gov/data/{y}/acs/acs5/variables").format(y=year)
r = requests.get(url)
response = r.json()
# remove header
all_vars = response[4:]
# sort by var
all_vars.sort(key=lambda g: g[0])
vars_dict = dict()
for name, label, concept in all_vars:
label = label.replace("!!", " ")
name = name[:-1]
vars_dict[name] = '{c}: {l}'.format(c=concept,l=label)
vars_labels = [var + ' = ' + vars_dict[var] for var in vars]
transforms = transforms.where(pd.notnull(transforms), None)
transformations = list()
for index, row in transforms.iterrows():
vn = row['variable_name']
op = row['operator']
arg1 = row['argument1']
arg2 = row['argument2']
if arg2==None:
t = "{vn} = {vd} ({a1})".format(vn=vn, vd=vars_dict[arg1], a1=arg1)
else:
if arg1[0] == 'B':
arg1 = vars_dict[arg1] + "({0})".format(arg1)
if arg2[0] == 'B':
arg2 = vars_dict[arg2] + "({0})".format(arg2)
t = "{vn} = {a1} {op} {a2}".format(vn=vn, a1=arg1, op=op, a2=arg2)
transformations.append(t)
vars_transforms = vars_labels + transformations
return vars_transforms
def get_vars_data(vars, state_code, year):
""" downloads variable data from census
:param vars: list of strings of var names
:param state_code: string two digit state code
:param year: string four-digit year
:return: pandas dataframe of geoid and vars data
"""
logging.info('downloading vars {0} to {1}'.format(vars[0], vars[-1]))
# add E at the end for estimate vars
cols = [var + 'E' for var in vars]
# convert list of vars to string for api
cols = ','.join(cols)
# wait 1 min, 2 min and 5 min for each api call
timeouts = [60,120,300]
for wait in timeouts:
try:
url = ('https://api.census.gov/data/{y}/acs/acs5?get=' +
'{v}' + '&for=block%20group:*&in=state:{sc}%20county:*&key={k}').\
format(v=cols, sc=state_code, y=year, k=API_KEY)
r = requests.get(url, timeout=wait)
vars_data = r.json()
break
except:
continue
if vars_data is None:
logging.error('unable to download vars')
return
# remove header
vars_data = vars_data[1:]
# create geoids and sort by them
vars_data = [[''.join(data[-4:]), *data[:-4]] for data in vars_data]
vars_data.sort(key=lambda g: g[0])
vars_data = pd.DataFrame(vars_data, columns=['geoid',*vars])
for var in vars:
vars_data[var] = vars_data[var].astype('float64')
return vars_data
def combine_features(state_names, year, output_path):
""" combine individual csv state files into one
:param state_names: list of state names
:param year: string four-digit year
:param output_path: string path to write raw files to
:return: None
"""
logging.info('Combining all states into one file')
if state_names[0] == 'all':
state_names = sorted(STATE_CODES)
output_path = output_path.format(year=year)
output_file = output_path + 'acs_{y}_data.csv'.format(y=year)
if os.path.isfile(output_file):
os.remove(output_file)
df = pd.DataFrame()
for state in sorted(state_names):
logging.info('Adding {state}'.format(state=state))
temp = pd.read_csv(output_path + state + '.csv')
with open(output_file, mode = 'a') as f:
temp.to_csv(f, header=f.tell()==0,index = False)
os.remove(output_path + state + '.csv')
return
def acs_full(year, vars_file, output_path):
""" download all ACS data needed for model for a given year
:param year: string four-digit year
:param vars_file: string path and filename to list of variables
should have column header: variable_name
if doing transformations, headers should be (tab delimited):
variable_name, operator, argument1, argument2
:return: None
"""
state_names = sorted(STATE_CODES)
output_path = output_path.format(year=year)
# check to see which states have already been downloaded
if not pathlib.Path(output_path).exists():
os.mkdir(output_path)
completed_states = os.listdir(path=output_path)
completed_states = [state[:-4] for state in completed_states]
states = sorted(list(set(state_names) - set(completed_states)))
for state in states:
acs_main(state, year, vars_file, output_path)
return
def acs_main(state, year, vars_file, output_path):
""" download ACS data needed for model and do transformations
:param state: string state name
:param year: string four-digit year
:param vars_file: string path and filename to list of variables
should have column header: variable_name
if doing transformations, headers should be (tab delimited):
variable_name, operator, argument1, argument2
:param output_path: string path to write raw files to
:returns: None
"""
logging.info('downloading {st}'.format(st=state))
state_data = pd.DataFrame()
state_code = STATE_CODES[state][1]
output_path = output_path.format(year=year)
file_name = state + '.csv'
output_file = output_path + file_name
column_lookup = output_path + "acs_{y}_features_variables.csv".format(y=year)
if not pathlib.Path(output_path).exists():
os.mkdir(output_path)
# variables to be downloaded for transformations
transforms,vars = extract_vars(vars_file)
# create vars and transformation lookup file
if not pathlib.Path(column_lookup).exists():
logging.info('creating {cl}'.format(cl=column_lookup))
vars_lookup = get_vars_lookup(year, vars.copy(),transforms)
with open(column_lookup, 'w') as f:
f.write('\n'.join(vars_lookup))
# call api in batches of 50 vars (api limit) at a time
batch_vars = [vars[i:i+MAX_VARS] for i in range(0, len(vars), MAX_VARS)]
for vars in batch_vars:
# 50 cols of data for all rows
vars_data = get_vars_data(vars, state_code, year)
if vars_data is None:
return
if state_data.empty:
state_data = vars_data
else:
state_data = | pd.merge(state_data, vars_data, on='geoid') | pandas.merge |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = | validate_bool_kwarg(inplace, "inplace") | pandas.util._validators.validate_bool_kwarg |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy
import pandas
from numba import prange
from numba.extending import register_jitable
from numba.types import (float64, Boolean, Integer, NoneType, Number,
Omitted, StringLiteral, UnicodeType)
from sdc.datatypes.common_functions import TypeChecker
from sdc.datatypes.hpat_pandas_series_rolling_types import SeriesRollingType
from sdc.utils import sdc_overload_method
hpat_pandas_series_rolling_docstring_tmpl = """
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.core.window.Rolling.{method_name}
{limitations_block}
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_{method_name}.py
:language: python
:lines: 27-
:caption: {example_caption}
:name: ex_series_rolling_{method_name}
.. command-output:: python ./series/rolling/series_rolling_{method_name}.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.rolling <pandas.Series.rolling>`
Calling object with a Series.
:ref:`DataFrame.rolling <pandas.DataFrame.rolling>`
Calling object with a DataFrame.
:ref:`Series.{method_name} <pandas.Series.{method_name}>`
Similar method for Series.
:ref:`DataFrame.{method_name} <pandas.DataFrame.{method_name}>`
Similar method for DataFrame.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.rolling.{method_name}()` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling_{method_name}
Parameters
----------
self: :class:`pandas.Series.rolling`
input arg{extra_params}
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
@register_jitable
def arr_apply(arr, func):
"""Apply function for values"""
return func(arr)
@register_jitable
def arr_corr(x, y):
"""Calculate correlation of values"""
if len(x) == 0:
return numpy.nan
return numpy.corrcoef(x, y)[0, 1]
@register_jitable
def arr_nonnan_count(arr):
"""Count non-NaN values"""
return len(arr) - numpy.isnan(arr).sum()
@register_jitable
def arr_cov(x, y, ddof):
"""Calculate covariance of values 1D arrays x and y of the same size"""
if len(x) == 0:
return numpy.nan
return numpy.cov(x, y, ddof=ddof)[0, 1]
@register_jitable
def _moment(arr, moment):
mn = numpy.mean(arr)
s = numpy.power((arr - mn), moment)
return numpy.mean(s)
@register_jitable
def arr_kurt(arr):
"""Calculate unbiased kurtosis of values"""
n = len(arr)
if n < 4:
return numpy.nan
m2 = _moment(arr, 2)
m4 = _moment(arr, 4)
val = 0 if m2 == 0 else m4 / m2 ** 2.0
if (n > 2) & (m2 > 0):
val = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
return val
@register_jitable
def arr_max(arr):
"""Calculate maximum of values"""
if len(arr) == 0:
return numpy.nan
return arr.max()
@register_jitable
def arr_mean(arr):
"""Calculate mean of values"""
if len(arr) == 0:
return numpy.nan
return arr.mean()
@register_jitable
def arr_median(arr):
"""Calculate median of values"""
if len(arr) == 0:
return numpy.nan
return numpy.median(arr)
@register_jitable
def arr_min(arr):
"""Calculate minimum of values"""
if len(arr) == 0:
return numpy.nan
return arr.min()
@register_jitable
def arr_quantile(arr, q):
"""Calculate quantile of values"""
if len(arr) == 0:
return numpy.nan
return numpy.quantile(arr, q)
@register_jitable
def _moment(arr, moment):
mn = numpy.mean(arr)
s = numpy.power((arr - mn), moment)
return numpy.mean(s)
@register_jitable
def arr_skew(arr):
"""Calculate unbiased skewness of values"""
n = len(arr)
if n < 3:
return numpy.nan
m2 = _moment(arr, 2)
m3 = _moment(arr, 3)
val = 0 if m2 == 0 else m3 / m2 ** 1.5
if (n > 2) & (m2 > 0):
val = numpy.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2 ** 1.5
return val
@register_jitable
def arr_std(arr, ddof):
"""Calculate standard deviation of values"""
return arr_var(arr, ddof) ** 0.5
@register_jitable
def arr_sum(arr):
"""Calculate sum of values"""
return arr.sum()
@register_jitable
def arr_var(arr, ddof):
"""Calculate unbiased variance of values"""
length = len(arr)
if length in [0, ddof]:
return numpy.nan
return numpy.var(arr) * length / (length - ddof)
def gen_hpat_pandas_series_rolling_impl(rolling_func, output_type=None):
"""Generate series rolling methods implementations based on input func"""
nan_out_type = output_type is None
def impl(self):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
out_type = input_arr.dtype if nan_out_type == True else output_type # noqa
output_arr = numpy.empty(length, dtype=out_type)
def apply_minp(arr, minp):
finite_arr = arr[numpy.isfinite(arr)]
if len(finite_arr) < minp:
return numpy.nan
else:
return rolling_func(finite_arr)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = apply_minp(arr_range, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = apply_minp(arr_range, minp)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return impl
def gen_hpat_pandas_series_rolling_zerominp_impl(rolling_func, output_type=None):
"""Generate series rolling methods implementations with zero min_periods"""
nan_out_type = output_type is None
def impl(self):
win = self._window
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
out_type = input_arr.dtype if nan_out_type == True else output_type # noqa
output_arr = numpy.empty(length, dtype=out_type)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = rolling_func(arr_range)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = rolling_func(arr_range)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return impl
hpat_pandas_rolling_series_count_impl = register_jitable(
gen_hpat_pandas_series_rolling_zerominp_impl(arr_nonnan_count, float64))
hpat_pandas_rolling_series_kurt_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_kurt, float64))
hpat_pandas_rolling_series_max_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_max, float64))
hpat_pandas_rolling_series_mean_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_mean, float64))
hpat_pandas_rolling_series_median_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_median, float64))
hpat_pandas_rolling_series_min_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_min, float64))
hpat_pandas_rolling_series_skew_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_skew, float64))
hpat_pandas_rolling_series_sum_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_sum, float64))
@sdc_overload_method(SeriesRollingType, 'apply')
def hpat_pandas_series_rolling_apply(self, func, raw=None):
ty_checker = TypeChecker('Method rolling.apply().')
ty_checker.check(self, SeriesRollingType)
raw_accepted = (Omitted, NoneType, Boolean)
if not isinstance(raw, raw_accepted) and raw is not None:
ty_checker.raise_exc(raw, 'bool', 'raw')
def hpat_pandas_rolling_series_apply_impl(self, func, raw=None):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
def culc_apply(arr, func, minp):
finite_arr = arr.copy()
finite_arr[numpy.isinf(arr)] = numpy.nan
if len(finite_arr) < minp:
return numpy.nan
else:
return arr_apply(finite_arr, func)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = culc_apply(arr_range, func, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = culc_apply(arr_range, func, minp)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return hpat_pandas_rolling_series_apply_impl
@sdc_overload_method(SeriesRollingType, 'corr')
def hpat_pandas_series_rolling_corr(self, other=None, pairwise=None):
ty_checker = TypeChecker('Method rolling.corr().')
ty_checker.check(self, SeriesRollingType)
# TODO: check `other` is Series after a circular import of SeriesType fixed
# accepted_other = (bool, Omitted, NoneType, SeriesType)
# if not isinstance(other, accepted_other) and other is not None:
# ty_checker.raise_exc(other, 'Series', 'other')
accepted_pairwise = (bool, Boolean, Omitted, NoneType)
if not isinstance(pairwise, accepted_pairwise) and pairwise is not None:
ty_checker.raise_exc(pairwise, 'bool', 'pairwise')
nan_other = isinstance(other, (Omitted, NoneType)) or other is None
def hpat_pandas_rolling_series_corr_impl(self, other=None, pairwise=None):
win = self._window
minp = self._min_periods
main_series = self._data
main_arr = main_series._data
main_arr_length = len(main_arr)
if nan_other == True: # noqa
other_arr = main_arr
else:
other_arr = other._data
other_arr_length = len(other_arr)
length = max(main_arr_length, other_arr_length)
output_arr = numpy.empty(length, dtype=float64)
def calc_corr(main, other, minp):
# align arrays `main` and `other` by size and finiteness
min_length = min(len(main), len(other))
main_valid_indices = numpy.isfinite(main[:min_length])
other_valid_indices = numpy.isfinite(other[:min_length])
valid = main_valid_indices & other_valid_indices
if len(main[valid]) < minp:
return numpy.nan
else:
return arr_corr(main[valid], other[valid])
for i in prange(min(win, length)):
main_arr_range = main_arr[:i + 1]
other_arr_range = other_arr[:i + 1]
output_arr[i] = calc_corr(main_arr_range, other_arr_range, minp)
for i in prange(win, length):
main_arr_range = main_arr[i + 1 - win:i + 1]
other_arr_range = other_arr[i + 1 - win:i + 1]
output_arr[i] = calc_corr(main_arr_range, other_arr_range, minp)
return | pandas.Series(output_arr) | pandas.Series |
#%%
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import manifold
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn.cluster import AgglomerativeClustering
from time import time
#%%
data = pd.read_csv('data/ml_requests.csv')
data.head()
#%%
items = data['items'].str.split(',')
features_names = set().union(*items)
##%%
orders = | pd.DataFrame(index=items.index, columns=features_names) | pandas.DataFrame |
from glob import glob
import pandas as pd
from os import path
datadir = '/Volumes/T7/BEST-AIR/data/ConcExpRisk_tract_poll_CA/'
parts = [path for path in glob(datadir + 'part*')]
csvs = [path for path in glob(datadir + 'part*/*.csv')]
datadict = {}
for pathname in csvs:
print(f"Reading '{pathname}'")
df = pd.read_csv(pathname, index_col=None)
basename = path.basename(pathname)
part = pathname.split('/')[-2]
fileroot = path.splitext(basename)[0]
key = (fileroot, part)
datadict[key] = df
basenames = {pair[0] for pair in datadict.keys()}
parts = sorted({pair[1] for pair in datadict.keys()})
print("Combining parts...")
data = {}
for basename in basenames:
df_parts = [datadict[(basename, part)] for part in parts]
data[basename] = | pd.concat(df_parts, axis='rows') | pandas.concat |
# Common functions for this project
import os, time, datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
from scipy.stats import zscore
from copy import deepcopy
def ctime():
t = time.time()
f = '%Y-%m-%d %H:%M:%S '
return datetime.datetime.fromtimestamp(t).strftime(f)
def load_data(drop_behavior=False):
data_path = os.path.abspath('./data/data_raw_labeled.pkl')
raw_data = np.load(data_path, allow_pickle=True)
if drop_behavior:
behavior_variables = ['distress_TQ', 'loudness_VAS10']
raw_data.drop(columns=behavior_variables, inplace=True)
return raw_data
def load_behavior_data(current_behavior_path=None):
if current_behavior_path is None:
current_behavior_path = './../data/features_nonEEG.xlsx'
sheets = ['vars_continuous', 'vars_categorical']
dfs = []
for sheet in sheets:
if 'categorical' in sheet:
dtype = 'category'
else:
dtype = 'float'
behavior_df = pd.read_excel(current_behavior_path, sheet_name=sheet, dtype=dtype)
dfs.append(behavior_df)
final = pd.concat(dfs, sort=False, axis=1)
final.dropna(inplace=True)
return final
def get_group_indices(full_sides=True):
def _preprocess_side_data(side_series):
# Convert asymmetrical side category to LR category
cleaned_side_data = deepcopy(side_series)
for s, subj_data in enumerate(side_data):
if subj_data < 0:
cleaned_side_data.iloc[s] = -1
elif subj_data == 0:
cleaned_side_data.iloc[s] = 0
else:
cleaned_side_data.iloc[s] = 1
return cleaned_side_data
behavior_df = load_behavior_data()
type_data = behavior_df['tinnitus_type']
tin_types = pd.unique(type_data)
side_data = behavior_df['tinnitus_side']
if full_sides:
tin_sides = pd.unique(side_data)
else:
new_side_data = _preprocess_side_data(side_data)
tin_sides = pd.unique(new_side_data)
side_data = new_side_data
type_1, type_2, type_3 = [], [], []
side_1, side_2, side_3, side_4, side_5 = [], [], [], [], []
for subj in range(len(behavior_df.index)):
if type_data.iloc[subj] == tin_types[0]:
type_1.append(subj)
elif type_data.iloc[subj] == tin_types[1]:
type_2.append(subj)
elif type_data.iloc[subj] == tin_types[2]:
type_3.append(subj)
else:
print('Subject %d did not have type data' % subj)
if side_data.iloc[subj] == tin_sides[0]:
side_1.append(subj)
elif side_data.iloc[subj] == tin_sides[1]:
side_2.append(subj)
elif side_data.iloc[subj] == tin_sides[2]:
side_3.append(subj)
else:
print('Subject %d did not have side data' % subj)
if full_sides:
if side_data.iloc[subj] == tin_sides[3]:
side_4.append(subj)
elif side_data.iloc[subj] == tin_sides[4]:
side_5.append(subj)
else:
print('Subject %d did not have side data' % subj)
res = {'type_%d_subj_indices' % tin_types[0]: type_1,
'type_%d_subj_indices' % tin_types[1]: type_2,
'type_%d_subj_indices' % tin_types[2]: type_3,
'side_%d_subj_indices' % tin_sides[0]: side_1,
'side_%d_subj_indices' % tin_sides[1]: side_2,
'side_%d_subj_indices' % tin_sides[2]: side_3}
if full_sides:
res['side_%d_subj_indices' % tin_sides[3]] = side_4
res['side_%d_subj_indices' % tin_sides[4]] = side_5
return res
def generate_test_df(n=100, c=10, normalize=True):
test_data = np.random.rand(n, c)
if normalize:
test_data = zscore(test_data, ddof=1)
column_names = ['Column_%d' % x for x in range(c)]
test_df = pd.DataFrame(test_data, columns=column_names)
return test_df
def clean_df_to_numpy(df):
# Dumb function to give networkx a numpy array
n_rows = len(df.index)
n_cols = len(list(df))
new_array = np.ndarray(shape=(n_rows, n_cols))
for x in range(n_rows):
for y in range(n_cols):
new_array[x, y] = df.iloc[x, y]
return new_array
def load_data_full_subjects():
# Remove EEG subjects that don't have behavior data
behavior_df = load_behavior_data()
conn_df = load_connectivity_data()
filt_df = conn_df.filter(items=behavior_df.index, axis=0) # Remove EEG subjects with missing rowvals in behavior_df
return behavior_df, filt_df
def dummy_code_binary(categorical_series):
# Sex: 1M, -1F
string_categorical_series = pd.DataFrame(index=categorical_series.index, columns=list(categorical_series))
for colname in list(categorical_series):
string_series = []
for value in categorical_series[colname].values:
if value == 1:
if 'sex' in colname:
string_series.append('male')
else:
string_series.append('yes')
elif value == -1:
if 'sex' in colname:
string_series.append('female')
else:
string_series.append('no')
string_categorical_series[colname] = string_series
dummy_series = pd.get_dummies(string_categorical_series)
old_names = list(dummy_series)
return dummy_series.rename(columns=dict(zip(old_names, ['categorical_%s' % d for d in old_names])))
def convert_tin_to_str(tinnitus_data, data_type):
str_data = []
if data_type is 'tinnitus_side':
for t in tinnitus_data:
if t == -1:
str_data.append('Left')
elif t == -0.5:
str_data.append('Left>Right')
elif t == 0.0:
str_data.append('Bilateral')
elif t == 0.5:
str_data.append('Right>Left')
elif t == 1.0:
str_data.append('Right')
if len(str_data) != len(tinnitus_data):
raise ValueError('Side data not parsed correctly')
elif data_type is 'tinnitus_type':
for t in tinnitus_data:
if t == -1.0:
str_data. append('PT')
elif t == 0.0:
str_data.append('PT_and_NBN')
elif t == 1.0:
str_data.append('NBN')
if len(str_data) != len(tinnitus_data):
raise ValueError('Type data not parsed correctly')
return str_data
def save_xls(dict_df, path):
# Save a dictionary of dataframes to an excel file, with each dataframe as a seperate page
writer = | pd.ExcelWriter(path) | pandas.ExcelWriter |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + trusted=true
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
import ast
from lib.functions_data import *
# + trusted=true
import sys
from pathlib import Path
import os
cwd = os.getcwd()
parent = str(Path(cwd).parents[0])
sys.path.append(parent)
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# To avoid pulling the full dataset down each time we re-run the notebook, a CSV of the cut-down dataset is saved for easier reloading.
# + trusted=true
#Checking for the cut of the full dataset and creating it if it doesn't exist:
try:
dec = pd.read_csv(parent + '/data/dec_euctr_extract.csv').drop('Unnamed: 0', axis=1)
except FileNotFoundError:
cols = ['eudract_number_with_country', 'date_of_competent_authority_decision',
'clinical_trial_type', 'national_competent_authority', 'eudract_number',
'date_on_which_this_record_was_first_entered_in_the_eudract_data',
'trial_status', 'date_of_the_global_end_of_the_trial', 'trial_results']
#You can use this URL if you want to download the full raw data
data_link = 'https://www.dropbox.com/s/4qt0msiipyn7crm/euctr_euctr_dump-2020-12-03-095517.csv.zip?dl=1'
dec = pd.read_csv(data_link, compression='zip', low_memory=False, usecols=cols)
dec.to_csv(parent + '/data/dec_euctr_extract.csv')
#This is additional data we collect from the results page we need for certain analyses
results_info = pd.read_csv(parent + '/data/euctr_data_quality_results_scrape_dec_2020.csv')
results_info['trial_start_date'] = pd.to_datetime(results_info.trial_start_date)
# + trusted=true
#Quick look at the spread of trial statuses on the EUCTR
dec.trial_status.value_counts(dropna=False)
# -
# The "date_of_competent_authority_decision" field has 2 nonsensical year values in which the correct value can reasonably be derived from context. We fix those below:
#
# https://www.clinicaltrialsregister.eu/ctr-search/trial/2009-016759-22/DK
#
# https://www.clinicaltrialsregister.eu/ctr-search/trial/2006-006947-30/FR
# + trusted=true
ind = dec[dec.date_of_competent_authority_decision.notnull() &
dec.date_of_competent_authority_decision.str.contains('210')].index
ind = ind.to_list()[0]
ind_2 = dec[dec.date_of_competent_authority_decision.notnull() &
dec.date_of_competent_authority_decision.str.contains('2077')].index
ind_2 = ind_2.to_list()[0]
dec.at[ind, 'date_of_competent_authority_decision'] = '2010-06-18'
dec.at[ind_2, 'date_of_competent_authority_decision'] = '2007-04-05'
# + trusted=true
#get rid of all protocols from non EU/EEA countries
dec_filt = dec[dec.clinical_trial_type != 'Outside EU/EEA'].reset_index(drop=True)
#lets see how many that is:
print(len(dec) - len(dec_filt))
# + trusted=true
dec_ctas = dec[['eudract_number', 'eudract_number_with_country']].groupby('eudract_number').count()['eudract_number_with_country']
print(f'There are {len(dec_ctas)} registered trials and {dec_ctas.sum()} CTAs including non-EU/EEA CTAs')
# + trusted=true
decf_ctas = dec_filt[['eudract_number', 'eudract_number_with_country']].groupby('eudract_number').count()['eudract_number_with_country']
print(f'There are {len(decf_ctas)} registered trials and {decf_ctas.sum()} CTAs excluding non-EU/EEA CTAs')
# + trusted=true
#Making dates into dates and adding a column of just the "Year" for relevant dates
dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'] = pd.to_datetime(dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'])
dec_filt['entered_year'] = dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'].dt.year
dec_filt['date_of_competent_authority_decision'] = pd.to_datetime(dec_filt['date_of_competent_authority_decision'])
dec_filt['approved_year'] = dec_filt['date_of_competent_authority_decision'].dt.year
# + trusted=true
#Creating a copy of the original dataset we can mess with and
#renaming columns to better variable names
analysis_df = dec_filt.copy()
analysis_df.columns = ['eudract_number_country',
'approved_date',
'clinical_trial_type',
'nca',
'eudract_number',
'date_entered',
'trial_status',
'completion_date',
'trial_results',
'entered_year',
'approved_year']
#And update the NCA names to the more accurate recent names
analysis_df['nca'] = analysis_df['nca'].replace(nca_name_mapping)
# + trusted=true
#Table 1
analysis_df[['nca', 'eudract_number_country']].groupby('nca').count()
# + trusted=true
#You can reproduce the data on the earliest registered protocol for each country by running this cell
#with the appropriate country abbreviation. For example, to get the date for Italy:
print(earliest_record_check(analysis_df, 'Italy - AIFA'))
#Uncomment this to get the date for all countries at once
#for abrev in country_abrevs.keys():
# print(f'Country: {abrev}\nEarliest record date: {earliest_record_check(dec_filt, abrev)}')
# + trusted=true
#lastly this is helpful to have the country names in various orders
ordered_countries_original = list(dec_filt.national_competent_authority.value_counts().index)
ordered_countries_new = list(analysis_df.nca.value_counts().index)
# -
# # Registrations Over Time
# + trusted=true
reg_df = analysis_df[['eudract_number', 'nca', 'date_entered', 'entered_year', 'approved_date', 'approved_year']].reset_index(drop=True)
reg_df.head()
# + trusted=true
#Data for Overall Trend in Registrations
grouped_overall = reg_df[['eudract_number']].groupby([reg_df.entered_year]).count()
earliest_entered = reg_df[['eudract_number', 'date_entered']].groupby('eudract_number', as_index=False).min()
earliest_entered['year'] = earliest_entered.date_entered.dt.year
unique_trials = earliest_entered[['eudract_number', 'year']].groupby('year').count()
# + trusted=true
fig, ax = plt.subplots(figsize = (12,6), dpi=400)
grouped_overall[(grouped_overall.index > 2004) & (grouped_overall.index < 2020)].plot(ax=ax, legend=False, lw=2,
marker='.', markersize=12)
unique_trials[(unique_trials.index > 2004) & (unique_trials.index < 2020)].plot(ax=ax, legend=False, grid=True,
lw=2, marker='^', markersize=10)
ax.legend(['Total CTAs', 'Unique Trials'], bbox_to_anchor = (1, 1))
ax.set_xticks(range(2005, 2020))
ax.set_yticks(range(0,7500, 500))
plt.xlabel('CTA Entry Year', labelpad=10)
plt.ylabel('Records Entered')
plt.title('Trend in new CTA and Trial Registration on the EUCTR', pad=10)
#fig.savefig(parent + '/data/Figures/fig_s1.jpg', bbox_inches='tight', dpi=400)
fig.show()
# -
# Now we're interested in breaking the data down a bit further. Here we will break it down into quarters and years for more detailed analysis. We graph the years for which we have full EUCTR data (2005-2019).
# + trusted=true
grouped = reg_df[['eudract_number']].groupby([reg_df.nca, pd.PeriodIndex(reg_df.date_entered, freq='Q')]).count()
get_index = reg_df[['eudract_number']].groupby(pd.PeriodIndex(reg_df.date_entered, freq='Q')).count()
quarters = list(get_index.index)
# + trusted=true
grouped_2 = reg_df[['eudract_number']].groupby([reg_df.nca, pd.PeriodIndex(reg_df.date_entered, freq='Y')]).count()
get_index = reg_df[['eudract_number']].groupby(pd.PeriodIndex(reg_df.date_entered, freq='Y')).count()
years = list(get_index.index)
# + trusted=true
grouped_year = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.entered_year]).count()
grouped_year_2 = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.approved_year]).count()
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
#fig.suptitle("Cumulative trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
| pd.set_option('mode.chained_assignment', None) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 15:07:04 2019
@author: ning
"""
import pandas as pd
import os
from glob import glob
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
from matplotlib import pyplot as plt
from utils import resample_ttest_2sample,MCPConverter
working_dir = '../results/all_vs_one'
working_data = glob(os.path.join(working_dir,'pos*.csv'))
df_pos = pd.concat([pd.read_csv(f) for f in working_data])
df_pos_all = df_pos[df_pos['feature'] == 'all']
df_pos_one = df_pos[df_pos['feature'] != 'all']
working_data = glob(os.path.join(working_dir,'att*.csv'))
df_att = pd.concat([ | pd.read_csv(f) | pandas.read_csv |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': | pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC') | pandas.Timestamp |
"""analisis de malware y benigno con dataset y naive
preprocesado por chinos con binario
paper 244802 en df 293333"""
import pandas as pd
import matplotlib.pyplot as plt
"""matplotlib inline"""
plt.rcParams['figure.figsize'] = (16, 9)
plt.style.use('ggplot')
from sklearn import datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.naive_bayes import GaussianNB
import os
import glob
# Primero especificamos un patrón del archivo y lo pasamos como parámetro en la función glob
os.chdir("C:\\DatosChinos\\ransomware")
os.getcwd()
csv_files = glob.glob('*.csv')
# Mostrar el archivo csv_files, el cual es una lista de nombres
print(csv_files)
list_data = []
# Escribimos un loop que irá a través de cada uno de los nombres de archivo a través de globbing y el resultado final será la lista dataframes
for filename in csv_files:
data = | pd.read_csv(filename) | pandas.read_csv |
from collections import defaultdict
import numpy as np
import pandas as pd
import scipy.stats
from matplotlib import gridspec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import cm
from conf import *
cmap = cm.get_cmap('tab10')
colors = [cmap(0), cmap(0), cmap(1), cmap(1), cmap(2), cmap(2), cmap(3), cmap(4), cmap(6), cmap(7)]
alphas = [1., 0.7, 1., 0.7, 1., 0.7, 1., 1., 1., 1.]
names = {'HotMaps_cat_1': '3D clusters tumor',
'HotMaps_cat_2': '3D clusters pan-cancer',
'CLUSTL_cat_1': 'Linear clusters tumor',
'CLUSTL_cat_2': 'Linear clusters pan-cancer',
'smRegions_cat_1': 'Domain enrichment tumor',
'smRegions_cat_2': 'Domain enrichment pan-cancer',
'PhyloP': 'Conservation',
'PTM': 'Post-translational modification',
'csqn_type_missense': 'Missense',
'csqn_type_nonsense': 'Nonsense'}
"""Utils"""
def get_PFAMs_per_transcript(PFAM_files, PFAM_info, transcript):
df_pfam = | pd.read_csv(PFAM_files, sep="\t", names=["ENSEMBL_GENE", "ENSEMBL_TRANSCRIPT", "START", "END", "DOMAIN"]) | pandas.read_csv |
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import copy
import logging
import pandas as pd
import multiprocessing as mp
from ..orbit import TestOrbit
from ..utils import Timeout
from ..utils import _initWorker
from ..utils import _checkParallel
logger = logging.getLogger(__name__)
__all__ = [
"Backend"
]
TIMEOUT = 30
def propagation_worker(orbits, t1, backend):
with Timeout(seconds=TIMEOUT):
try:
propagated = backend._propagateOrbits(orbits, t1)
except TimeoutError:
logger.critical("Propagation timed out on orbit IDs (showing first 5): {}".format(orbits.ids[:5]))
propagated = pd.DataFrame()
return propagated
def ephemeris_worker(orbits, observers, backend):
with Timeout(seconds=TIMEOUT):
try:
ephemeris = backend._generateEphemeris(orbits, observers)
except TimeoutError:
logger.critical("Ephemeris generation timed out on orbit IDs (showing first 5): {}".format(orbits.ids[:5]))
ephemeris = pd.DataFrame()
return ephemeris
def orbitDetermination_worker(observations, backend):
with Timeout(seconds=TIMEOUT):
try:
orbits = backend._orbitDetermination(observations)
except TimeoutError:
logger.critical("Orbit determination timed out on observations (showing first 5): {}".format(observations["obs_id"].values[:5]))
orbits = pd.DataFrame()
return orbits
def projectEphemeris_worker(ephemeris, test_orbit_ephemeris):
assert len(ephemeris["mjd_utc"].unique()) == 1
assert len(test_orbit_ephemeris["mjd_utc"].unique()) == 1
assert ephemeris["mjd_utc"].unique()[0] == test_orbit_ephemeris["mjd_utc"].unique()[0]
observation_time = ephemeris["mjd_utc"].unique()[0]
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
test_orbit_ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToEphemeris(ephemeris)
return ephemeris
class Backend:
def __init__(self, name="Backend", **kwargs):
self.__dict__.update(kwargs)
self.name = name
self.is_setup = False
return
def setup(self):
return
def _propagateOrbits(self, orbits, t1):
"""
Propagate orbits from t0 to t1.
THIS FUNCTION SHOULD BE DEFINED BY THE USER.
"""
err = (
"This backend does not have orbit propagation implemented."
)
raise NotImplementedError(err)
def propagateOrbits(
self,
orbits,
t1,
chunk_size=100,
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate each orbit in orbits to each time in t1.
Parameters
----------
orbits : `~thor.orbits.orbits.Orbits`
Orbits to propagate.
t1 : `~astropy.time.core.Time`
Times to which to propagate each orbit.
chunk_size : int, optional
Number of orbits to send to each job.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
propagated : `~pandas.DataFrame`
Propagated orbits with at least the following columns:
orbit_id : Input orbit ID.
mjd_tdb : Time at which state is defined in MJD TDB.
x, y, z, vx, vy, vz : Orbit as cartesian state vector with units
of au and au per day.
"""
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
orbits_split = orbits.split(chunk_size)
t1_duplicated = [copy.deepcopy(t1) for i in range(len(orbits_split))]
backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
propagation_worker_ray = ray.remote(propagation_worker)
propagation_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for o, t, b in zip(orbits_split, t1_duplicated, backend_duplicated):
p.append(propagation_worker_ray.remote(o, t, b))
propagated_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
propagated_dfs = p.starmap(
propagation_worker,
zip(
orbits_split,
t1_duplicated,
backend_duplicated,
)
)
p.close()
propagated = pd.concat(propagated_dfs)
propagated.reset_index(
drop=True,
inplace=True
)
else:
propagated = self._propagateOrbits(
orbits,
t1
)
return propagated
def _generateEphemeris(self, orbits, observers):
"""
Generate ephemerides for the given orbits as observed by
the observers.
THIS FUNCTION SHOULD BE DEFINED BY THE USER.
"""
err = (
"This backend does not have ephemeris generation implemented."
)
raise NotImplementedError(err)
def generateEphemeris(
self,
orbits,
observers,
test_orbit=None,
chunk_size=100,
num_jobs=1,
parallel_backend="mp"
):
"""
Generate ephemerides for each orbit in orbits as observed by each observer
in observers.
Parameters
----------
orbits : `~thor.orbits.orbits.Orbits`
Orbits for which to generate ephemerides.
observers : dict or `~pandas.DataFrame`
A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values.
test_orbit : `~thor.orbits.orbits.Orbits`
Test orbit to use to generate projected coordinates.
chunk_size : int, optional
Number of orbits to send to each job.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
ephemeris : `~pandas.DataFrame`
Ephemerides with at least the following columns:
orbit_id : Input orbit ID
observatory_code : Observatory's MPC code.
mjd_utc : Observation time in MJD UTC.
RA : Right Ascension in decimal degrees.
Dec : Declination in decimal degrees.
"""
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
orbits_split = orbits.split(chunk_size)
observers_duplicated = [copy.deepcopy(observers) for i in range(len(orbits_split))]
backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
ephemeris_worker_ray = ray.remote(ephemeris_worker)
ephemeris_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for o, t, b in zip(orbits_split, observers_duplicated, backend_duplicated):
p.append(ephemeris_worker_ray.remote(o, t, b))
ephemeris_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
ephemeris_dfs = p.starmap(
ephemeris_worker,
zip(
orbits_split,
observers_duplicated,
backend_duplicated,
)
)
p.close()
ephemeris = pd.concat(ephemeris_dfs)
ephemeris.reset_index(
drop=True,
inplace=True
)
else:
ephemeris = self._generateEphemeris(
orbits,
observers
)
if test_orbit is not None:
test_orbit_ephemeris = self._generateEphemeris(
test_orbit,
observers
)
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g).copy() for g in ephemeris_grouped.groups]
test_orbit_ephemeris_grouped = test_orbit_ephemeris.groupby(by=["observatory_code", "mjd_utc"])
test_orbit_ephemeris_split = [test_orbit_ephemeris_grouped.get_group(g) for g in test_orbit_ephemeris_grouped.groups]
if num_jobs > 1:
if parallel_backend == "ray":
projectEphemeris_worker_ray = ray.remote(projectEphemeris_worker)
projectEphemeris_worker_ray = projectEphemeris_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for e, te in zip(ephemeris_split, test_orbit_ephemeris_split):
p.append(projectEphemeris_worker_ray.remote(e, te))
ephemeris_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
ephemeris_dfs = p.starmap(
projectEphemeris_worker,
zip(
ephemeris_split,
test_orbit_ephemeris_split
)
)
p.close()
else:
ephemeris_dfs = []
for e, te in zip(ephemeris_split, test_orbit_ephemeris_split):
ephemeris_df = projectEphemeris_worker(e, te)
ephemeris_dfs.append(ephemeris_df)
ephemeris = | pd.concat(ephemeris_dfs) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#%%
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
import gensim
from gensim import corpora, models
import pandas as pd
from nltk import FreqDist
import re
import spacy
# libraries for visualization
import pyLDAvis
import pyLDAvis.gensim
import matplotlib.pyplot as plt
import seaborn as sns
#%% loading data
# change this to reviews.csv
reviews_df = | pd.read_csv('Kindle_review.csv') | pandas.read_csv |
# MIT-License
#
# Copyright 2020 World Infectious Disease Monitoring Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Load the Pandas libraries with alias 'pd'
import pandas as pd
#import matplotlib.pyplot as plt
#import seaborn as sns
#
# Concat the df's and rename the columns in the historic data to match the 2019 format
dfs = []
data_old1 = pd.read_csv("../data/2014w1-2018w52-1.csv")
data_old1.rename(columns={'weekendingdate':'week_ending_date',
'allcause':'all_cause',
'naturalcause':'natural_cause'}, inplace=True)
dfs.append(data_old1)
data_old2 = pd.read_csv("../data/2014w1-2018w52-2.csv")
data_old2.rename(columns={'weekendingdate':'week_ending_date',
'allcause':'all_cause',
'naturalcause':'natural_cause'}, inplace=True)
dfs.append(data_old2)
data_new = pd.read_csv("../data/2019w1-2020w20.csv")
dfs.append(data_new)
data_all = pd.concat(dfs, ignore_index=True)
#
#Clean up the years in new and historic data
data_new.drop(data_new[data_new.mmwryear == 2019].index, inplace=True) #Remove the last year from new data
data_all.drop(data_all[data_all.mmwryear == 2020].index, inplace=True) #Remove the current year in historic data
#
#Convert the year/week columns to something pandas can deal with
data_new['formatted_date'] = data_new.mmwryear * 1000 + data_new.mmwrweek * 10 + 0
data_new['date'] = | pd.to_datetime(data_new['formatted_date'], format='%Y%W%w') | pandas.to_datetime |
import json
import pandas as pd
pd.set_option('display.max_rows', 30)
pd.set_option('display.max_columns', 50)
pd.set_option('display.width', 1200)
import matplotlib.pyplot as plt
import seaborn as sns # used for plot interactive graph.
import warnings
warnings.filterwarnings('ignore')
def load_tmdb_movies(path):
df = | pd.read_csv(path) | pandas.read_csv |
"""
Custom excel types for pandas objects (eg dataframes).
For information about custom types in PyXLL see:
https://www.pyxll.com/docs/udfs.html#custom-types
For information about pandas see:
http://pandas.pydata.org/
Including this module in your pyxll config adds the following custom types that can
be used as return and argument types to your pyxll functions:
- dataframe
- series
- series_t
Dataframes with multi-index indexes or columns will be returned with the columns and
index values in the resulting array. For normal indexes, the index will only be
returned as part of the resulting array if the index is named.
eg::
from pyxll import xl_func
import pandas as pa
@xl_func("int rows, int cols, float value: dataframe")
def make_empty_dataframe(rows, cols, value):
# create an empty dataframe
df = pa.DataFrame({chr(c + ord('A')) : value for c in range(cols)}, index=range(rows))
# return it. The custom type will convert this to a 2d array that
# excel will understand when this function is called as an array
# function.
return df
@xl_func("dataframe df, string col: float")
def sum_column(df, col):
return df[col].sum()
In excel (use Ctrl+Shift+Enter to enter an array formula)::
=make_empty_dataframe(3, 3, 100)
>> A B C
>> 100 100 100
>> 100 100 100
>> 100 100 100
=sum_column(A1:C4, "A")
>> 300
"""
from pyxll import xl_return_type, xl_arg_type
import datetime as dt
import pandas as pa
import numpy as np
import pytz
try:
import pywintypes
except ImportError:
pywintypes = None
@xl_return_type("dataframe", "var")
def _dataframe_to_var(df):
"""return a list of lists that excel can understand"""
if not isinstance(df, pa.DataFrame):
return df
df = df.applymap(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
index_header = [str(df.index.name)] if df.index.name is not None else []
if isinstance(df.index, pa.MultiIndex):
index_header = [str(x) or "" for x in df.index.names]
if isinstance(df.columns, pa.MultiIndex):
result = [([""] * len(index_header)) + list(z) for z in zip(*list(df.columns))]
for header in result:
for i in range(1, len(header) - 1):
if header[-i] == header[-i-1]:
header[-i] = ""
if index_header:
column_names = [x or "" for x in df.columns.names]
for i, col_name in enumerate(column_names):
result[i][len(index_header)-1] = col_name
if column_names[-1]:
index_header[-1] += (" \ " if index_header[-1] else "") + str(column_names[-1])
num_levels = len(df.columns.levels)
result[num_levels-1][:len(index_header)] = index_header
else:
if index_header and df.columns.name:
index_header[-1] += (" \ " if index_header[-1] else "") + str(df.columns.name)
result = [index_header + list(df.columns)]
if isinstance(df.index, pa.MultiIndex):
prev_ix = None
for ix, row in df.iterrows():
header = list(ix)
if prev_ix:
header = [x if x != px else "" for (x, px) in zip(ix, prev_ix)]
result.append(header + list(row))
prev_ix = ix
elif index_header:
for ix, row in df.iterrows():
result.append([ix] + list(row))
else:
for ix, row in df.iterrows():
result.append(list(row))
return _normalize_dates(result)
@xl_return_type("series", "var")
def _series_to_var(s):
"""return a list of lists that excel can understand"""
if not isinstance(s, pa.Series):
return s
# convert any errors to exceptions so they appear correctly in Excel
s = s.apply(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
result = list(map(list, zip(s.index, s)))
return _normalize_dates(result)
@xl_return_type("series_t", "var")
def _series_to_var_transform(s):
"""return a list of lists that excel can understand"""
if not isinstance(s, pa.Series):
return s
# convert any errors to exceptions so they appear correctly in Excel
s = s.apply(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
result = list(map(list, zip(*zip(s.index, s))))
return _normalize_dates(result)
@xl_arg_type("dataframe", "var")
def _var_to_dataframe(x):
"""return a pandas DataFrame from a list of lists"""
if not isinstance(x, (list, tuple)):
raise TypeError("Expected a list of lists")
x = _fix_pywintypes(x)
columns = x[0]
rows = x[1:]
return pa.DataFrame(list(rows), columns=columns)
@xl_arg_type("series", "var")
def _var_to_series(s):
"""return a pandas Series from a list of lists (arranged vertically)"""
if not isinstance(s, (list, tuple)):
raise TypeError("Expected a list of lists")
s = _fix_pywintypes(s)
keys, values = [], []
for row in s:
if not isinstance(row, (list, tuple)):
raise TypeError("Expected a list of lists")
if len(row) < 2:
raise RuntimeError("Expected rows of length 2 to convert to a pandas Series")
key, value = row[:2]
# skip any empty rows
if key is None and value is None:
continue
keys.append(key)
values.append(value)
return | pa.Series(values, index=keys) | pandas.Series |
import pandas as pd
def clean_impex_dataset(file_location, sheet):
df = | pd.read_excel(file_location, skiprows=5, sheet_name=sheet) | pandas.read_excel |
# -*- coding:utf-8 -*-
import pandas as pd
import time,datetime
import matplotlib.pyplot as plt
import random
pd.set_option('display.height',1000)
pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',50)
pd.set_option('display.width',1000)
class report(object):
def __init__(self,df):
'''
df should be follow this format:"index(title is none)" "stock","buy_date","sell_date","holddays","profit"
'''
self.df = df
# print (sorted(df["buy_date"])[0])
# print (sorted(df["sell_date"])[-1])
# import sys
# sys.exit(0)
def formatdate(self,s):
try:
t = time.strptime(s, "%Y/%m/%d")
y,m,d = t[0:3]
rst = datetime.datetime(y,m,d).strftime('%Y-%m-%d')
except:
print (s)
rst = s
return rst
def positiongain(self,start="2011-01-01",end="2016-11-18"):
totalmoney = 100
leftmoney = 100
holds = []
datelist = [i.strftime('%Y-%m-%d') for i in pd.date_range(start, end)]
result = {d:[] for d in datelist}
gains = {d:0 for d in datelist}
df = self.df
for i in df.values:
i[2] = self.formatdate(i[2])
i[3] = self.formatdate(i[3])
result[i[2]].append(i)
for date in datelist:
currentholdnum = len(holds)
current_day_could_buy_num = len(result[date])
if current_day_could_buy_num >=1 and currentholdnum < 10:
buymoney = leftmoney/(10-currentholdnum)
if current_day_could_buy_num + currentholdnum <= 10:
leftmoney = leftmoney - buymoney*current_day_could_buy_num
holds.extend([(i,buymoney) for i in result[date]])
else:
leftmoney = 0
holds.extend([(i,buymoney) for i in random.sample(result[date],10-currentholdnum)])
for d in holds[:]:
if d[0][3]>= date :
holds.remove(d)
leftmoney += d[1]*(d[0][5]+1)
totalmoney += d[1]*d[0][5]
gains[date] = totalmoney
newdf = pd.DataFrame(data=[gains[i] for i in datelist], index=datelist,columns=["a",])
newdf["date"] = newdf.index
newdf.plot(x="date", y="a", kind='area')
plt.savefig("positiongain_from_{}_to_{}.png".format(start,end))
plt.show()
def cumulative_graph(self,datafile="",start="2013-03-01",end="2016-11-18"):
date = [i.strftime('%Y-%m-%d') for i in pd.date_range(start, end)]
result = {d:[0,0] for d in date}
df = self.df
for i in df.values:
i[2] = self.formatdate(i[2])
i[3] = self.formatdate(i[3])
result[i[3]][0] += i[5]
result[i[3]][1] += 1
newdf = | pd.DataFrame(data=[[result[i][0],result[i][1]] for i in date], index=date,columns=["a","b"]) | pandas.DataFrame |
"""Visualizes burst data."""
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def to_pandas(ebursts, offsets, svo, unit='s'):
"""Exports burst and offset data to dataframes for a single term.
ebursts is an edgebust dict from the SVO object
offsets is an offsets dict from the SVO object
"""
svos = " | ".join(svo)
bdf = pd.DataFrame(ebursts)
bdf[1] = pd.to_datetime(bdf[1], unit=unit)
bdf[2] = pd.to_datetime(bdf[2], unit=unit)
bdf.columns = ['level', 'start', 'end']
bdf['svo'] = svos
odf = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from pathlib import Path
def load(path, dt=False, stats=False):
print("loading data from",path)
dataFrames = {}
dataFrames['gameLogs'] = pd.read_csv(path/'GameLogs.csv', index_col=False)
if dt:
dataFrames['gameLogs']['Date'] = pd.to_datetime(dataFrames['gameLogs']['Date'])
dataFrames['people'] = pd.read_csv(path/'People.csv', index_col=False)
dataFrames['teams'] = pd.read_csv(path/'Teams.csv', index_col=False)
dataFrames['managers'] = pd.read_csv(path/'Managers.csv', index_col=False)
dataFrames['fieldings'] = pd.read_csv(path/'Fielding.csv', index_col=False)
dataFrames['pitchings'] = pd.read_csv(path/'Pitching.csv', index_col=False)
dataFrames['battings'] = pd.read_csv(path/'Batting.csv', index_col=False)
if stats:
dataFrames['stats'] = pd.read_csv(path/'Stats.csv', index_col=False)
print("data loaded")
return dataFrames
def save(path, dataFrames, stats=False):
print("Saving data to",path)
dataFrames['gameLogs'].to_csv(path/'GameLogs.csv', index = False)
dataFrames['people'].to_csv(path/'People.csv', index = False)
dataFrames['teams'].to_csv(path/'Teams.csv', index = False)
dataFrames['managers'].to_csv(path/'Managers.csv', index = False)
dataFrames['fieldings'].to_csv(path/'Fielding.csv', index = False)
dataFrames['pitchings'].to_csv(path/'Pitching.csv', index = False)
dataFrames['battings'].to_csv(path/'Batting.csv', index = False)
if stats:
dataFrames['stats'].to_csv(path/'Stats.csv', index = False)
print("Data saved")
def filter(path, saveState=True):
def filterFrame(frame, columns, renames=None):
frame = frame[columns]
if(renames!=None):
frame = frame.rename(columns=renames)
return frame.reset_index(drop=True)
def filterGameLogs(gameLogs, people):
gameLogs['Date'] = pd.to_datetime(gameLogs['Date'], format="%Y%m%d")
gameLogs['Visiting league AL'] = gameLogs['Visiting league']=="AL"
gameLogs['Home league AL'] = gameLogs['Home league']=="AL"
gameLogs = gameLogs[gameLogs['Forfeit information'].isna()]
gameLogs = gameLogs[gameLogs['Protest information'].isna()]
generalColumns = [
'Date','Visiting: Team','Visiting league AL','Home: Team','Home league AL','Visiting: Score','Home: Score']
visitingStatsColumns = [
'Visiting at-bats','Visiting hits','Visiting doubles','Visiting triples','Visiting homeruns','Visiting RBI','Visiting sacrifice hits','Visiting sacrifice flies',
'Visiting hit-by-pitch','Visiting walks','Visiting intentional walks','Visiting strikeouts','Visiting stolen bases','Visiting caught stealing','Visiting grounded into double plays',
'Visiting left on base','Visiting pitchers used','Visiting individual earned runs','Visiting team earned runs','Visiting wild pitches',
'Visiting balks','Visiting putouts','Visiting assists','Visiting errors','Visiting passed balls','Visiting double plays','Visiting triple plays']
homeStatsColumns = [
'Home at-bats','Home hits','Home doubles','Home triples','Home homeruns','Home RBI','Home sacrifice hits','Home sacrifice flies',
'Home hit-by-pitch','Home walks','Home intentional walks','Home strikeouts','Home stolen bases','Home caught stealing','Home grounded into double plays',
'Home left on base','Home pitchers used','Home individual earned runs','Home team earned runs','Home wild pitches',
'Home balks','Home putouts','Home assists','Home errors','Home passed balls','Home double plays','Home triple plays']
visitingIDColumns = [
'Visiting team manager ID','Visiting starting pitcher ID',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID']
homeIDColumns = [
'Home team manager ID','Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
identifier = people[['playerID','retroID']].drop_duplicates(subset=['retroID']).dropna()
for column in visitingIDColumns+homeIDColumns:
merged = pd.merge(gameLogs[column], identifier, left_on=column, right_on='retroID', how="left")
gameLogs[column] = merged['playerID']
gameLogs = filterFrame(gameLogs, generalColumns+visitingStatsColumns+homeStatsColumns+visitingIDColumns+homeIDColumns)
gameLogs = gameLogs.dropna(subset=generalColumns)
for column in visitingStatsColumns+homeStatsColumns:
gameLogs = gameLogs[(gameLogs[column]>=0) | (gameLogs[column].isna())]
return gameLogs.reset_index(drop=True)
def filterPeople(people):
people['yearID'] = people['birthYear']
people['weight'] = 0.453592*people['weight']
people['height'] = 0.0254*people['height']
people['bats right'] = (people['bats']=="R") | (people['bats']=="B")
people['bats left'] = (people['bats']=="L") | (people['bats']=="B")
people['throws right'] = people['throws']=="R"
people = filterFrame(people, ['yearID','playerID','weight','height','bats right', 'bats left', 'throws right'])
return people.reset_index(drop=True)
def filterTeams(teams):
teams = filterFrame(teams,
['yearID','teamIDretro','divID','Rank','G','W','L','DivWin','LgWin','WSWin','R','AB','H','2B','3B','HR','BB','SO','SB','CS','HBP','SF','RA','ER','ERA','SHO','SV','HA','HRA','BBA','SOA','E','DP','FP'],
{"teamIDretro":"teamID","divID":"Division","G":"Games","W":"Wins","L":"Losses","DivWin":"Division winner","LgWin":"League winner","WSWin":"World series winner","R":"Runs scored","AB":"At bats"
,"H":"Hits by batters","2B":"Doubles","3B":"Triples","HR":"Homeruns","BB":"Walks","SO":"Strikeouts","SB":"Stolen bases","CS":"Cought stealing","HBP":"Batters hit by pitch"
,"SF":"Sacrifice flies","RA":"Opponents runs scored","ER":"Earned runs allowed","ERA":"Earned runs average","SHO":"Shutouts","SV":"Saves","HA":"Hits allowed"
,"HRA":"Homeruns allowed","BBA":"Walks allowed","SOA":"Strikeouts allowed","E":"Errors","DP":"Double plays","FP":"Fielding percentage"})
teams['division C'] = (teams['Division']=="C")
teams['division E'] = (teams['Division']=="E")
teams['division W'] = (teams['Division']=="W")
teams = teams.drop(columns=['Division'])
teams['Division winner'] = (teams['Division winner']=='Y')
teams['League winner'] = (teams['League winner']=='Y')
teams['World series winner']= (teams['World series winner']=='Y')
return teams.reset_index(drop=True)
print("start filtering")
dataFrames = load(path/'Input')
print("filter gameLogs")
dataFrames['gameLogs'] = filterGameLogs(dataFrames['gameLogs'], dataFrames['people'])
print("filter people")
dataFrames['people'] = filterPeople(dataFrames['people'])
print("filter teams")
dataFrames['teams'] = filterTeams(dataFrames['teams'])
print("filter managers")
dataFrames['managers'] = filterFrame(dataFrames['managers'],
['yearID','playerID','G','W','L'],
{"G":"Games","W":"Wins","L":"Losses"})
print("filter fieldings")
dataFrames['fieldings'] = filterFrame(dataFrames['fieldings'],
['yearID','playerID','PO','A','E','DP','PB','WP','SB','CS'],
{"PO":"Putouts","A":"Assists","E":"Error","DP":"Double plays","PB":"Passed Balls","WP":"Wild Pitches","SB":"Opponent Stolen Bases","CS":"Opponents Caught Stealing"})
print("filter pitchings")
dataFrames['pitchings'] = filterFrame(dataFrames['pitchings'],
['yearID','playerID','W','L','G','H','ER','HR','BB','SO','BAOpp','ERA','IBB','WP','HBP','BK','BFP','R','SH','SF','GIDP','SV','SHO'],
{"G":"Games","W":"Wins","L":"Losses","H":"Hits","ER":"Earned Runs","HR":"Homeruns","BB":"Walks","SO":"Strikeouts","BAOpp":"Opponent batting average","ERA":"ERA"
,"IBB":"Intentional walks","WP":"Wild pitches","HBP":"Batters hit by pitch","BK":"Balks","BFP":"Batters faced","R":"Runs allowed","SH":"Batters sacrifices"
,"SF":"Batters sacrifice flies","GIDP":"Grounded into double plays","SV":"Saves","SHO":"Shutouts"})
print("filter battings")
dataFrames['battings'] = filterFrame(dataFrames['battings'],
['yearID','playerID','AB','R','H','2B','3B','HR','RBI','SB','CS','BB','SO','IBB','HBP','SH','SF','GIDP'],
{"AB":"At bats","R":"Runs","H":"Hits","2B":"Doubles","3B":"Triples","HR":"Homeruns","RBI":"Runs batted in","SB":"Stolen bases","CS":"Caught stealing"
,"BB":"Base on balls","SO":"Strikeouts","IBB":"Intentional walks","HBP":"Hit by pitch","SH":"Sacrifice hits","SF":"Sacrifice flies","GIDP":"Grounded into double plays"})
print("data filtered")
if saveState:
save(path/'Filtered', dataFrames)
return dataFrames
def replace(path, dataFrames, default="mean", lastKnownState=True, saveState=True, inpurity=0.5):
def replaceFrame(frame, targets, gameLogs, default, lastKnownState, inpurity):
#define ID column
mID = 'playerID'
for column in frame.columns:
if column=='teamID':
mID = 'teamID'
break
if column=='playerID':
break
#drop inpure columns
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
frame = frame[nanFrame[nanFrame['inpurity']<=inpurity]['index'].tolist()]
#creating frame containing only usefull data
onlyFrame = None
for column in targets:
temp = gameLogs[['Date',column]]
temp['yearID'] = temp['Date'].dt.year-1
temp = temp.rename(columns={column:mID})
onlyFrame = pd.concat([onlyFrame, temp]).drop(columns=['Date']).drop_duplicates().dropna().reset_index(drop=True)
#combining duplicates
aggregators = {}
for column in frame.drop(columns=['yearID',mID]).columns:
if (column.find("average")>-1) or (column.find("percentage")>-1):
aggregators[column] = 'mean'
elif (column.find("winner")>-1) or (column.find("division")>-1) or (column.find("Rank")>-1):
aggregators[column] = 'max'
else:
aggregators[column] = 'sum'
temp = frame[frame.duplicated(keep=False, subset=['yearID',mID])]
temp2 = pd.merge(temp[['yearID',mID]],temp.drop(columns=['yearID',mID]).notna(), left_index=True, right_index=True).groupby(['yearID',mID], as_index=False).sum()
temp = temp.groupby(['yearID',mID], as_index=False).agg(aggregators)
for column in temp.columns:
vec = temp2[column]==0
col = temp[column]
col[vec] = None
temp[column] = col
frame = frame.drop_duplicates(keep=False, subset=['yearID',mID])
frame = pd.concat([frame, temp])
mIDs = np.array(list(dict.fromkeys(frame[mID].unique().tolist()+onlyFrame[mID].unique().tolist())))
years = np.array(list(dict.fromkeys(frame['yearID'].unique().tolist()+onlyFrame['yearID'].unique().tolist())))
fullFrame = pd.DataFrame(np.array(np.meshgrid(years, mIDs)).T.reshape(-1,2), columns=['yearID',mID])
fullFrame['yearID'] = pd.to_numeric(fullFrame['yearID'])
fullFrame = pd.merge(fullFrame, frame, on=['yearID',mID], how="left")
if lastKnownState:
fullFrame = pd.merge(fullFrame[['yearID',mID]], fullFrame.groupby([mID]).ffill().drop(columns=['yearID']), left_index=True, right_index=True)
frame = pd.merge(onlyFrame, fullFrame, on=['yearID',mID], how="left")
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
while (not (nanFrame[nanFrame['inpurity']>inpurity/3])['index'].tolist())==False:
frame = frame[frame[nanFrame.at[nanFrame['inpurity'].idxmax(), 'index']].notna()]
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
if default!=None:
for column in frame.columns:
if frame[column].dtype=="bool":
frame[column].fillna(False)
continue
if default=="mean":
if (frame[column].dtype=="float64") | (frame[column].dtype=="int64"):
frame[column] = frame[column].fillna(frame[column].mean())
elif default=="zero":
if (frame[column].dtype=="float64") | (frame[column].dtype=="int64"):
frame[column] = frame[column].fillna(0)
#nanFrame = frame.isna().sum().reset_index()
#nanFrame['inpurity'] = nanFrame[0]/frame.index.size
#print(nanFrame[nanFrame['inpurity']>0])
return frame.dropna().reset_index(drop=True)
def replaceGameLogs(gameLogs):
return gameLogs.dropna().reset_index(drop=True)
def replacePeople(people, gameLogs, default):
columns = ['Visiting team manager ID','Visiting starting pitcher ID'
,'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID'
,'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID'
,'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID'
,'Home team manager ID','Home starting pitcher ID'
,'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID'
,'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID'
,'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
onlyPeople = None
for column in columns:
temp = gameLogs[[column]]
temp = temp.rename(columns={column:'playerID'})
onlyPeople = pd.concat([onlyPeople, temp]).drop_duplicates().dropna().reset_index(drop=True)
people = pd.merge(onlyPeople, people, on='playerID', how="left")
nanFrame = people.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/people.index.size
while (not (nanFrame[nanFrame['inpurity']>inpurity/4])['index'].tolist())==False:
people = people[people[nanFrame.at[nanFrame['inpurity'].idxmax(), 'index']].notna()]
nanFrame = people.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/people.index.size
if default!=None:
for column in people.columns:
if people[column].dtype=="bool":
people[column].fillna(False)
continue
if default=="mean":
if (people[column].dtype=="float64") | (people[column].dtype=="int64"):
people[column] = people[column].fillna(people[column].mean())
elif default=="zero":
if (people[column].dtype=="float64") | (people[column].dtype=="int64"):
people[column] = people[column].fillna(0)
return people.dropna().reset_index(drop=True)
print("start handeling NAs")
print("handeling NA in gameLogs")
dataFrames['gameLogs'] = replaceGameLogs(dataFrames['gameLogs'])
print("handeling NA in people")
dataFrames['people'] = replacePeople(dataFrames['people'], dataFrames['gameLogs'], default)
print("handeling NA in teams")
dataFrames['teams'] = replaceFrame(dataFrames['teams'],
['Home: Team', 'Visiting: Team']
, dataFrames['gameLogs'], default, lastKnownState, inpurity)
print("handeling NA in managers")
dataFrames['managers'] = replaceFrame(dataFrames['managers'],
['Home team manager ID', 'Visiting team manager ID']
, dataFrames['gameLogs'], default, lastKnownState, inpurity)
print("handeling NA in fieldings")
dataFrames['fieldings'] = replaceFrame(dataFrames['fieldings'],
['Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID'
,'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID'
,'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID'
,'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID'
,'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID'
,'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
, dataFrames['gameLogs'], default, lastKnownState, inpurity)
print("handeling NA in pitchings")
dataFrames['pitchings'] = replaceFrame(dataFrames['pitchings'],
['Home starting pitcher ID', 'Visiting starting pitcher ID']
, dataFrames['gameLogs'], default, lastKnownState, inpurity)
print("handeling NA in battings")
dataFrames['battings'] = replaceFrame(dataFrames['battings'],
['Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID'
,'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID'
,'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID'
,'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID'
,'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID'
,'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
, dataFrames['gameLogs'], default, lastKnownState, inpurity)
print("NAs handeled")
if saveState:
save(path/'Replaced', dataFrames)
return dataFrames
def asPerformance(path, dataFrames, saveState=True):
def asPerformanceGameLogs(gameLogs):
gameLogs['Row'] = range(0,gameLogs.index.size)
gameLogs['Visiting: Win'] = gameLogs['Visiting: Score']>gameLogs['Home: Score']
gameLogs['Home: Win'] = gameLogs['Visiting: Score']<gameLogs['Home: Score']
gameLogs['Visiting: Fielding performance'] = (0
+1.5*gameLogs['Visiting putouts']
+1.25*gameLogs['Visiting assists']
+2.25*gameLogs['Visiting double plays']
-2*gameLogs['Visiting errors']
#-0.75*gameLogs['Visiting passed balls']
#-0.75*gameLogs['Visiting wild pitches']
#-1.5*gameLogs['Home stolen bases']
#+3.5*gameLogs['Visiting caught stealing']
)
gameLogs['Home: Fielding performance'] = (0
+1.5*gameLogs['Home putouts']
+1.25*gameLogs['Home assists']
+2.25*gameLogs['Home double plays']
-2*gameLogs['Home errors']
#-0.75*gameLogs['Home passed balls']
#-0.75*gameLogs['Home wild pitches']
#-1.5*gameLogs['Visiting stolen bases']
#+3.5*gameLogs['Home caught stealing']
)
gameLogs['Visiting: Pitching performance'] = (0
-1*gameLogs['Home hits']
-2*gameLogs['Home team earned runs']
-3*gameLogs['Home homeruns']
-1*gameLogs['Home walks']
+5*gameLogs['Visiting strikeouts']
+2*gameLogs['Visiting intentional walks']
-0.5*gameLogs['Visiting wild pitches']
-0.25*gameLogs['Home hit-by-pitch']
-0.25*gameLogs['Visiting balks']
-2*gameLogs['Home: Score']
-0.75*gameLogs['Home sacrifice hits']
-0.75*gameLogs['Home sacrifice flies']
+3*gameLogs['Visiting grounded into double plays'])
gameLogs['Home: Pitching performance'] = (0
-1*gameLogs['Visiting hits']
-2*gameLogs['Visiting team earned runs']
-3*gameLogs['Visiting homeruns']
-1*gameLogs['Visiting walks']
+5*gameLogs['Home strikeouts']
+2*gameLogs['Home intentional walks']
-0.5*gameLogs['Home wild pitches']
-0.25*gameLogs['Visiting hit-by-pitch']
-0.25*gameLogs['Home balks']
-2*gameLogs['Visiting: Score']
-0.75*gameLogs['Visiting sacrifice hits']
-0.75*gameLogs['Visiting sacrifice flies']
+3*gameLogs['Home grounded into double plays'])
gameLogs['Visiting: Batting performance'] = (0
+0.5*gameLogs['Visiting at-bats']
+2*gameLogs['Visiting: Score']
+1*gameLogs['Visiting hits']
+2*gameLogs['Visiting doubles']
+3*gameLogs['Visiting triples']
+3*gameLogs['Visiting homeruns']
+0.5*gameLogs['Visiting RBI']
+1.25*gameLogs['Visiting stolen bases']
-1*gameLogs['Home caught stealing']
+0.25*gameLogs['Visiting walks']
-2*gameLogs['Home strikeouts']
+0.75*gameLogs['Visiting intentional walks']
+0.25*gameLogs['Visiting hit-by-pitch']
+0.75*gameLogs['Visiting sacrifice hits']
+0.75*gameLogs['Visiting sacrifice flies']
-3*gameLogs['Home grounded into double plays'])
gameLogs['Home: Batting performance'] = (0
+0.5*gameLogs['Home at-bats']
+2*gameLogs['Home: Score']
+1*gameLogs['Home hits']
+2*gameLogs['Home doubles']
+3*gameLogs['Home triples']
+3*gameLogs['Home homeruns']
+0.5*gameLogs['Home RBI']
+1.25*gameLogs['Home stolen bases']
-1*gameLogs['Visiting caught stealing']
+0.25*gameLogs['Home walks']
-2*gameLogs['Visiting strikeouts']
+0.75*gameLogs['Home intentional walks']
+0.25*gameLogs['Home hit-by-pitch']
+0.75*gameLogs['Home sacrifice hits']
+0.75*gameLogs['Home sacrifice flies']
-3*gameLogs['Visiting grounded into double plays'])
gameLogs['Visiting: Pythagorean expectation'] = (
gameLogs['Visiting: Score']**1.83)/(gameLogs['Visiting: Score']**1.83+gameLogs['Home: Score']**1.83)
gameLogs['Home: Pythagorean expectation'] = (
gameLogs['Home: Score']**1.83)/(gameLogs['Home: Score']**1.83+gameLogs['Visiting: Score']**1.83)
#gameLogs['Visiting: BABIP'] = (
# (gameLogs['Visiting hits']-gameLogs['Visiting homeruns'])/(gameLogs['Visiting at-bats']-gameLogs['Visiting strikeouts']-gameLogs['Visiting homeruns']+gameLogs['Visiting sacrifice flies']))
#gameLogs['Home: BABIP'] = (
# (gameLogs['Home hits']-gameLogs['Home homeruns'])/(gameLogs['Home at-bats']-gameLogs['Home strikeouts']-gameLogs['Home homeruns']+gameLogs['Home sacrifice flies']))
gameLogs['League Diffrence'] = gameLogs['Home league AL'].astype('int32')-gameLogs['Visiting league AL'].astype('int32')
return gameLogs[['Row','Date','Visiting: Team','Home: Team','Visiting: Score','Home: Score','Visiting: Win','Home: Win','League Diffrence',
'Visiting: Fielding performance','Home: Fielding performance','Visiting: Pitching performance','Home: Pitching performance',
'Visiting: Batting performance','Home: Batting performance','Visiting: Pythagorean expectation','Home: Pythagorean expectation',
#'Visiting: BABIP','Home: BABIP',
'Visiting team manager ID','Visiting starting pitcher ID',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID',
'Home team manager ID','Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']]
def asPerformancePeople(people):
people['BMI'] = people['weight']/(people['height']**2)
return people[['yearID','playerID','BMI','bats right','bats left','throws right']]
def asPerformanceTeams(teams):
teams['Win rate'] = teams['Wins']/teams['Games']
teams['Season Performance'] = teams[['Division winner','League winner','World series winner']].mean(axis=1)+1/teams['Rank']
teams['Pythagorean expectation'] = (teams['Runs scored']**1.83)/(teams['Runs scored']**1.83+teams['Opponents runs scored']**1.83)
#teams['BABIP'] = ((teams['Hits by batters']-teams['Homeruns'])/
# (teams['At bats']-teams['Strikeouts']-teams['Homeruns']+teams['Sacrifice flies']))
#return teams[['yearID','teamID','Win rate','Season Performance','Pythagorean expectation','BABIP']]
return teams[['yearID','teamID','Win rate','Season Performance','Pythagorean expectation']]
def asPerformanceManagers(managers):
managers['Win rate'] = managers['Wins']/managers['Games']
return managers[['yearID','playerID','Win rate']]
def asPerformanceFieldings(fieldings):
fieldings['Fielding performance'] = (0
+1.5*fieldings['Putouts']
+1.25*fieldings['Assists']
+2.25*fieldings['Double plays']
-2*fieldings['Error']
#-0.75*fieldings['Passed Balls']
#-0.75*fieldings['Wild Pitches']
#-1.5*fieldings['Opponent Stolen Bases']
#+3.5*fieldings['Opponents Caught Stealing']
)
return fieldings[['yearID','playerID','Fielding performance']]
def asPerformancePitchings(pitchings):
pitchings['Pitching performance'] = (0
-1*pitchings['Hits']
-2*pitchings['Earned Runs']
-3*pitchings['Homeruns']
-1*pitchings['Walks']
+5*pitchings['Strikeouts']
+2*pitchings['Intentional walks']
-0.5*pitchings['Wild pitches']
-0.25*pitchings['Batters hit by pitch']
-0.25*pitchings['Balks']
-2*pitchings['Runs allowed']
-0.75*pitchings['Batters sacrifices']
-0.75*pitchings['Batters sacrifice flies']
+3*pitchings['Grounded into double plays'])
pitchings['Strikeouts per walk'] = pitchings['Strikeouts']/pitchings['Walks'].replace(0,1)
pitchings['Win rate'] = pitchings['Wins']/pitchings['Games']
pitchings['Homeruns per game'] = pitchings['Homeruns']/pitchings['Games']
pitchings['Shutouts per game'] = pitchings['Shutouts']/pitchings['Games']
pitchings['Saves per game'] = pitchings['Saves']/pitchings['Games']
return pitchings[['yearID','playerID','Win rate','Pitching performance','Strikeouts per walk','Homeruns per game','Shutouts per game','Saves per game','ERA']]
def asPerformanceBattings(battings):
battings['Batting performance'] = (0
+0.5*battings['At bats']
+2*battings['Runs']
+1*battings['Hits']
+2*battings['Doubles']
+3*battings['Triples']
+3*battings['Homeruns']
+0.5*battings['Runs batted in']
+1.25*battings['Stolen bases']
-1*battings['Caught stealing']
+0.25*battings['Base on balls']
-2*battings['Strikeouts']
+0.75*battings['Intentional walks']
+0.25*battings['Hit by pitch']
+0.75*battings['Sacrifice hits']
+0.75*battings['Sacrifice flies']
-3*battings['Grounded into double plays'])
return battings[['yearID','playerID','Batting performance']]
print("creating performances")
print("evaluating performance in gameLogs")
dataFrames['gameLogs'] = asPerformanceGameLogs(dataFrames['gameLogs'])
print("evaluating performance in people")
dataFrames['people'] = asPerformancePeople(dataFrames['people'])
print("evaluating performance in teams")
dataFrames['teams'] = asPerformanceTeams(dataFrames['teams'])
print("evaluating performance in managers")
dataFrames['managers'] = asPerformanceManagers(dataFrames['managers'])
print("evaluating performance in fieldings")
dataFrames['fieldings'] = asPerformanceFieldings(dataFrames['fieldings'])
print("evaluating performance in pitchings")
dataFrames['pitchings'] = asPerformancePitchings(dataFrames['pitchings'])
print("evaluating performance in battings")
dataFrames['battings'] = asPerformanceBattings(dataFrames['battings'])
print("performances created")
if saveState:
save(path/'Performance', dataFrames)
return dataFrames
def merge(path, dataFrames, saveState=True):
def mergeFrame(frame, gameLogs, visitingColumns, homeColumns):
def mergeColumns(columns, gameLogs, frameColumn):
temp = gameLogs[columns]
temp['yearID'] = gameLogs['Date'].dt.year-1
for column in columns:
temp = pd.merge(temp, frame[['playerID', 'yearID', frameColumn]], left_on=[column,'yearID'], right_on=['playerID','yearID'], how="left").drop(columns=['playerID',column])
return temp.drop(columns=['yearID']).mean(axis=1)
merged = gameLogs[['Row']]
for frameColumn in frame.drop(columns=['playerID','yearID']).columns:
merged['Visiting: Average '+frameColumn] = mergeColumns(visitingColumns, gameLogs, frameColumn)
merged['Home: Average '+frameColumn] = mergeColumns(homeColumns, gameLogs, frameColumn)
return merged
def mergePeople(people, gameLogs):
def getAges(yearIDs, teamLogs):
teamLogs = gameLogs[teamLogs]
teamLogs['year'] = gameLogs['Date'].dt.year-1
for column in teamLogs.drop(columns=['year']):
teamLogs = pd.merge(teamLogs, yearIDs, left_on=column ,right_on='playerID').drop(columns=['playerID'])
return teamLogs['year']-teamLogs.drop(columns=['year']).mean(axis=1)
def getSide(sideIDs, teamLogs):
teamLogs = gameLogs[teamLogs]
merged = pd.DataFrame()
for sideColumn in sideIDs.drop(columns=['playerID']):
temp = teamLogs
for column in teamLogs.columns:
temp = pd.merge(temp, sideIDs[['playerID', sideColumn]], left_on=column ,right_on='playerID').drop(columns=['playerID'])
merged[sideColumn] = temp.sum(axis=1)
merged['Batting side'] = merged['bats right']-merged['bats left']
merged['Throwing side'] = merged['throws right']-(10-merged['throws right'])
return merged[['Batting side','Throwing side']]
def getBMI(BMIids, teamLogs):
teamLogs = gameLogs[teamLogs]
for column in teamLogs.columns:
teamLogs = pd.merge(teamLogs, BMIids, left_on=column, right_on='playerID').drop(columns=['playerID'])
return teamLogs.mean(axis=1)
visiting = [
'Visiting starting pitcher ID',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID']
home = [
'Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
merged = gameLogs[['Row']]
merged['Visiting: Average age'] = getAges(people[['playerID', 'yearID']], visiting)
sides = getSide(people[['bats right','bats left','throws right','playerID']], visiting)
merged['Visiting: Batting side'] = sides['Batting side']
merged['Visiting: Throwing side'] = sides['Throwing side']
merged['Visiting: Average BMI'] = getBMI(people[['BMI','playerID']], visiting)
merged['Home: Average age'] = getAges(people[['playerID', 'yearID']], home)
sides = getSide(people[['bats right','bats left','throws right','playerID']], home)
merged['Home: Batting side'] = sides['Batting side']
merged['Home: Throwing side'] = sides['Throwing side']
merged['Home: Average BMI'] = getBMI(people[['BMI','playerID']], home)
return merged
def mergeTeams(teams, gameLogs):
teamLogs = gameLogs[['Visiting: Team','Home: Team']]
teamLogs['yearID'] = gameLogs['Date'].dt.year-1
merged = gameLogs[['Row']]
for teamColumn in teams.drop(columns=['teamID','yearID']):
merged['Visiting: Team - '+teamColumn] = pd.merge(teamLogs, teams[['teamID','yearID',teamColumn]], left_on=['yearID','Visiting: Team'], right_on=['yearID', 'teamID'], how="left")[teamColumn]
merged['Home: Team - '+teamColumn] = pd.merge(teamLogs, teams[['teamID','yearID',teamColumn]], left_on=['yearID','Home: Team'], right_on=['yearID', 'teamID'], how="left")[teamColumn]
return merged
def mergePitchings(pitchings, gameLogs):
teamLogs = gameLogs[['Visiting starting pitcher ID','Home starting pitcher ID']]
teamLogs['yearID'] = gameLogs['Date'].dt.year-1
merged = gameLogs[['Row']]
for pitchColumn in pitchings.drop(columns=['playerID','yearID']):
merged['Visiting: Pitcher - '+pitchColumn] = pd.merge(teamLogs, pitchings[['playerID','yearID',pitchColumn]], left_on=['yearID','Visiting starting pitcher ID'], right_on=['yearID', 'playerID'], how="left")[pitchColumn]
merged['Home: Pitcher - '+pitchColumn] = pd.merge(teamLogs, pitchings[['playerID','yearID',pitchColumn]], left_on=['yearID','Home starting pitcher ID'], right_on=['yearID', 'playerID'], how="left")[pitchColumn]
return merged
def mergeManagers(managers, gameLogs):
teamLogs = gameLogs[['Visiting team manager ID','Home team manager ID']]
teamLogs['yearID'] = gameLogs['Date'].dt.year-1
merged = gameLogs[['Row']]
for managerColumn in managers.drop(columns=['playerID','yearID']):
merged['Visiting: Manager - '+managerColumn] = pd.merge(teamLogs, managers[['playerID','yearID',managerColumn]], left_on=['yearID','Visiting team manager ID'], right_on=['yearID', 'playerID'], how="left")[managerColumn]
merged['Home: Manager - '+managerColumn] = pd.merge(teamLogs, managers[['playerID','yearID',managerColumn]], left_on=['yearID','Home team manager ID'], right_on=['yearID', 'playerID'], how="left")[managerColumn]
return merged
def createRollingStats(gameLogs, toRollVisiting=[], toRollHome=[], vsWindow=5, generalWindow=10, replaceNA="mean"):
def getTeamPart(teamColumn ,fromFrame):
teams = []
for team in fromFrame[teamColumn].unique():
teams.append(fromFrame[fromFrame[teamColumn]==team])
return teams
def getRollingMean(target ,team, window):
return team.loc[:,target].rolling(window).mean().shift(1)
def getValues(targetTeamType, teams, columns, vsWindow, generalWindow):
teamColumns = []
versusColumns = []
versus = []
for team in teams:
for column in columns:
newColumn = column + ' ratio'
teamColumns.append(newColumn)
team[newColumn] = getRollingMean(column, team, generalWindow)
tempvs = getTeamPart(targetTeamType, team)
for vsteam in tempvs:
for column in columns:
newColumn = column + ' versus ratio'
versusColumns.append(newColumn)
vsteam[newColumn] = getRollingMean(column, vsteam, vsWindow)
versus.append(pd.concat(tempvs))
return pd.merge(pd.concat(teams)[['Row']+list(dict.fromkeys(teamColumns))], pd.concat(versus)[['Row']+list(dict.fromkeys(versusColumns))], on='Row')
gameLogs = gameLogs[['Row','Visiting: Team','Home: Team','Visiting: Score','Home: Score','Visiting: Win','Home: Win','League Diffrence']+toRollVisiting+toRollHome]
gameLogs['Home: Odd'] = gameLogs['Home: Score']/(gameLogs['Home: Score']+gameLogs['Visiting: Score']).replace(0,1)
gameLogs['Visiting: Odd'] = gameLogs['Visiting: Score']/(gameLogs['Visiting: Score']+gameLogs['Home: Score']).replace(0,1)
toRollVisiting = ['Visiting: Score','Visiting: Win','Visiting: Odd']+toRollVisiting
toRollHome = ['Home: Score','Home: Win','Home: Odd']+toRollHome
visiting = gameLogs[['Row','Visiting: Team','Home: Team']+toRollVisiting]
home = gameLogs[['Row','Visiting: Team','Home: Team']+toRollHome]
visitings = getValues('Home: Team', getTeamPart('Visiting: Team', visiting), toRollVisiting, vsWindow, generalWindow)
homes = getValues('Visiting: Team', getTeamPart('Home: Team', home), toRollHome, vsWindow, generalWindow)
teams = pd.merge(visitings, homes, on='Row').sort_values('Row').reset_index(drop=True)
teams = pd.merge(gameLogs[['Row','League Diffrence']], teams, on='Row')
if replaceNA=="mean":
for column in teams.columns:
teams[column] = teams[column].fillna(teams[column].mean())
elif replaceNA=="drop":
teams = teams.dropna().reset_index()
return teams
print("start merging")
print("merge people")
dataFrames['people'] = mergePeople(dataFrames['people'], dataFrames['gameLogs'])
print("merge teams")
dataFrames['teams'] = mergeTeams(dataFrames['teams'], dataFrames['gameLogs'])
print("merge managers")
dataFrames['managers'] = mergeManagers(dataFrames['managers'], dataFrames['gameLogs'])
print("merge fieldings")
dataFrames['fieldings'] = mergeFrame(dataFrames['fieldings'], dataFrames['gameLogs'],[
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID'],[
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID'])
print("merge pitchings")
dataFrames['pitchings'] = mergePitchings(dataFrames['pitchings'],dataFrames['gameLogs'])
print("merge battings")
dataFrames['battings'] = mergeFrame(dataFrames['battings'], dataFrames['gameLogs'],[
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID'],[
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID'])
print("merging complete")
print("creating rolling stats")
dataFrames['stats'] = createRollingStats(dataFrames['gameLogs'],
['Visiting: Fielding performance','Visiting: Pitching performance','Visiting: Batting performance','Visiting: Pythagorean expectation'#,'Visiting: BABIP'
],
['Home: Fielding performance','Home: Pitching performance','Home: Batting performance','Home: Pythagorean expectation'#,'Home: BABIP'
])
print("dropping ID columns")
dataFrames['gameLogs'] = dataFrames['gameLogs'].drop(columns=[
'Visiting team manager ID','Visiting starting pitcher ID','League Diffrence',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID',
'Home team manager ID','Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID'])
if saveState:
save(path/'Merged', dataFrames, stats=True)
return dataFrames
def createLearningData(data, path, excludes=[], operator="-", dropRowIndex=True):
def createDiffrence(dataFrame, operator, outcast=pd.DataFrame()):
homes = []
visitings = {}
temp = outcast
for column in dataFrame.drop(columns=temp.columns).columns:
if column.find("Visiting")>-1:
visitings[column.replace("Visiting: ","")] = column.replace("Visiting: ","")
elif column.find("Home")>-1:
homes.append(column.replace("Home: ",""))
else:
temp[column] = dataFrame[column]
for homeCol in homes:
visitingCol = visitings.pop(homeCol)
if operator=="-":
temp[homeCol+" diffrence"] = dataFrame['Home: '+homeCol]-dataFrame['Visiting: '+visitingCol]
elif operator=="/":
temp[homeCol+" diffrence"] = dataFrame['Home: '+homeCol]/dataFrame['Visiting: '+visitingCol].replace(0,1)
elif operator=="/sum":
temp[homeCol+" diffrence"] = dataFrame['Home: '+homeCol]/(dataFrame['Home: '+homeCol]+dataFrame['Visiting: '+visitingCol]).replace(0,1)
else:
temp[homeCol+" diffrence"] = dataFrame['Home: '+homeCol]>dataFrame['Visiting: '+visitingCol]
return temp
for exc in excludes:
data.pop(exc)
path = path/'Learning'
gameLogs = data.pop('gameLogs')
predictors = gameLogs[['Row']]
for frame in data:
predictors = pd.merge(predictors, data[frame], on='Row', how="left")
predictors = predictors.dropna()
targets = | pd.merge(predictors[['Row']], gameLogs, on='Row', how="left") | pandas.merge |
#### Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health
### Neville Subproject: Using Random Forestes, Factor Analysis, and Recursive Feature Selection to Screen Variables for Imapcts on Public Health
## Florida Charts Diabetes Mortality by Census Tract: Pyhton Computing Language Code Script by DrewC!
### Section 1: Import Libraries, Import Dataset, Prepare for Classification
## Import Standard Libraries
import os # Inlcuded in every script DC!
import numpy as np # Inclduded in every code script DC!
import pandas as pd # Incldued in every code script for DC!
## Import Specific Libraries and Packages
import sklearn.ensemble # SciKit Learn package contains many classification options beyond those used below
from sklearn.ensemble import RandomForestClassifier as rfc # Random Forest classification component
from sklearn.feature_selection import RFE as rfe # Recursive Feature selection component
from sklearn.svm import SVR as svr # Linear model for RFE
import statsmodels.api as sm # Multiple regression model
## Import Dataset
os.chdir("C:/Users/drewc/GitHub/Healthy_Neighborhoods") # Set wd to project repository
df_nev = | pd.read_csv("_data/neville_dm2_acs.csv", encoding = "ISO-8859-1", low_memory= False) | pandas.read_csv |
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from rumi.processing import demand
from rumi.io import loaders
from rumi.io import demand as demandio
import pytest
def get_GDP1(dummy):
index = pd.MultiIndex.from_product([['INDIA'],
['ER', 'WR'],
[11, 12, 13, 14, 15]],
names=['ModelGeography',
'SubGeography1',
'Year'])
return | pd.DataFrame({'GDP': [1, 2, 4, 8, 4]*2}, index=index) | pandas.DataFrame |
'''
Number: 4
This file models sequences of words using the statistical properties of n-grams.
I follow the Markov assumption (or independence assumption).
As for probabilities, I use and implement the Kneser-Ney Smoothing method.
'''
import pandas as pd
# --> Unigrams Probabilities (something wrong with this)
def kneserNey_prob_uni(profanity=False, pickled=True):
'''
profanity: bool - if True, it works in the dtm with profanities filterd out
pickled: bool - if True the result is pickled
keep: int - keep the top-keep unigrams when pickled; give it -1 if you want to keep all
'''
# read the DTM for bigrams
path = ''
if profanity:
path = 'pickles/DTMs/dtm-bi/integrated/dtm_bi_badnot.pkl'
else:
path = 'pickles/DTMs/dtm-bi/integrated/dtm_bi.pkl'
dtm_bi = pd.read_pickle(path)
# read the DTM for unigrams
path = ''
if profanity:
path = 'pickles/DTMs/dtm-uni/integrated/dtm_uni_badnot.pkl'
else:
path = 'pickles/DTMs/dtm-uni/integrated/dtm_uni.pkl'
dtm_uni = pd.read_pickle(path)
# compute probabilities
prob_uni = ((dtm_bi.groupby('Word2')[['Count']].count() / len(dtm_bi))).rename(columns={'Count':'Prob'})
#prob_uni.sort_values(by='Prob', ascending=False, inplace=True)
# merge with unigrams DTM
count_prob_uni = pd.merge(dtm_uni, prob_uni, how='left', left_on='Word1', right_on='Word2')
count_prob_uni.sort_values('Prob', ascending=False, inplace=True)
count_prob_uni.dropna(inplace=True)
if pickled:
path = ''
if profanity:
path = 'pickles/Probs/profanity-not/prob_uni_np.pkl'
else:
path = 'pickles/Probs/general/prob_uni.pkl'
count_prob_uni.to_pickle(path)
return count_prob_uni
# --> Unigrams Probabilities
def kneserNey_ngrams(typ, discount_weight=0.75, profanity=False, pickled=True):
'''
typ: type of n-grams which you want to get the probability for
discount_weight: discount weight/value
profanity: bool - if True, it works in the dtm with profanities filterd out
pickled: bool - if True the result is pickled
'''
valid_typ = ['uni', 'bi', 'tri', 'quad', 'penta', 'hexa', 'hepta', 'octa', 'nona', 'deca']
if typ not in valid_typ[1:]:
raise ValueError("dtm: 'typ' must be one of %r." % valid_typ[1:])
n = valid_typ.index(typ) + 1
# read the DTM for bigrams and unigrams probabilities
path_ngrams = ''
path_prob = ''
if profanity:
path_ngrams = 'pickles/DTMs/dtm-' + typ + '/integrated/dtm_' + typ + '_badnot.pkl'
path_prob = 'pickles/Probs/profanity-not/prob_' + valid_typ[valid_typ.index(typ)-1] + '_np.pkl'
else:
path_ngrams = 'pickles/DTMs/dtm-' + typ + '/integrated/dtm_' + typ + '.pkl'
path_prob = 'pickles/Probs/general/prob_' + valid_typ[valid_typ.index(typ)-1] + '.pkl'
dtm = pd.read_pickle(path_ngrams)
probs = pd.read_pickle(path_prob)
probs.drop(columns=['Count'], inplace=True)
cols = ['Word' + format(i + 2) for i in range(n - 1)]
cols.append('Prob_w2_wn')
probs.columns = cols
count_prob = dtm.copy()
# get the n-1_grams probability of 'Word2 ... Wordn' & add to count_prob
merge_on = ['Word'+format(i+2) for i in range(n-1)]
count_prob = pd.merge(count_prob, probs, how='left', left_on=merge_on, right_on=merge_on)
# compute count sum of 'Word1....Wordn-1' in ngrams & add to count_prob
aggregate_on = ['Word'+format(i+1) for i in range(n-1)]
count_w1_wn__1 = dtm.groupby(aggregate_on)[['Count']].sum().rename(columns={'Count': 'Count_w1_wn__1'})
count_prob = pd.merge(count_prob, count_w1_wn__1, how='left', left_on=aggregate_on, right_on=aggregate_on)
# compute number of ngrams with 'Word1...Wordn-1' as the first n-1 words & add to count_prob
num_w1_wn__1 = dtm.groupby(aggregate_on)[['Count']].count().rename(columns={'Count': 'Num_w1_wn__1'})
count_prob = | pd.merge(count_prob, num_w1_wn__1, how='left', left_on=aggregate_on, right_on=aggregate_on) | pandas.merge |
import matplotlib
import pandas as pd
CSV_FILE = 'data.csv'
class DataProcessing:
def __init__(self):
self.df = pd.read_csv(CSV_FILE, parse_dates=['Data'])
self.last_date = self.df['Data'].max().date()
self.today = | pd.Timestamp.today() | pandas.Timestamp.today |
import pandas as pd
import textacy
import textblob
import en_core_web_sm
nlp = en_core_web_sm.load()
# Multiprocessing Imports
from dask import dataframe as dd
from dask.multiprocessing import get
from multiprocessing import cpu_count
# Sentiment Imports
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# Local Imports
from src.utils.pandas_utils import pivot_df_to_row
##
def text_vectorize_and_cluster(text, df=None, vectorizer=None, clusterer=None,
vector_params=None, clusterer_params=None,
outlier_scores=False, one_hot_labels=False, return_df=False,
return_type='clusters'):
""" Given processed text, vectorize and cluster it. Return cluster labels or cluster labels
along with fitted vectorizer and clusterer.
Parameters
----------
text : object
Object which contains text that will be passed to the transformer's .fit_transform() method
As such, text must already be processed and in correct format.
df : Pandas DataFrame
Optional dataframe attach clustering results to
vectorizer: object
Class for text vectorization. Must follow sklearn transformer convention and
implement .fit_transform() method
E.g. CountVectorizer from sklearn
vector_params: dict[str:obj]
Dictionary to pass to vectorizer as parameters
clusterer: object
Class for clustering. Must follow sklearn estimator convention and
implement .fit_predict() method for implementing cluster assignment
clusterer_params: dict[str:obj]
Dictionary to pass to clusterer as parameters
outlier_scores: boolean
Flag to indicate outlier scores computed by clusterer. Accessed
from clusterer.outlier_scores_ attribute
one_hot_labels: boolean
Flag to indicate if cluster labels should be one hot encoded
instead of returns as a one dimensional array of ordinal
integer labels
return_df: boolean
Flag to indicate if results should be returned concatenated
with the dataframe passed to 'df' kword arg
return_type: str in ['clusters', 'all', ]
String indicating return type. Must be on of ['clusters', 'all', 'df']
clusters: Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
all: Return the fitted vectorizer, clusterer and cluster label results
Returns
-------
clusters: pd.Series or pd.DataFrame
Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
clusters, vectorizer, clusterer: object, object, pd.Series or pd.DataFrame
Return the fitted vectorizer, clusterer and cluster label results
"""
# Check vectorizer and clusterer for correct methods
assert "fit_transform" in dir(vectorizer), "vectorizer has no 'fit_transform' method"
assert "fit_predict" in dir(clusterer), "clusterer has no 'fit_predict' method"
if return_df:
assert isinstance(df, pd.DataFrame), "If specifying 'return_df', data must be passed to argument 'df'"
# Instantiate vectorizer with params if specified
if vector_params:
vectorizer = vectorizer(**vector_params)
# Else instantiate the vectorizer
elif vectorizer:
vectorizer = vectorizer()
# Fit and trasnform text to vectors
vectors = vectorizer.fit_transform(text)
# Instantiate vectorizer with params if specified
if clusterer_params:
clusterer = clusterer(**clusterer_params)
elif clusterer:
clusterer = clusterer()
# Fit and trasnform vectors to clusters
cluster_labels = clusterer.fit_predict(vectors)
if len(set(clusterer.labels_)) <= 1:
return print('Clusterer could not find any meaningful labels. All data would fall under one cluster')
# Create DataFrame of Cluster Labels
results = pd.DataFrame(cluster_labels, columns=['Cluster_Label'])
# Add Outlier Score if specified
if outlier_scores:
results['Outlier_Score'] = clusterer.outlier_scores_
# Add labels as dummy variables
if one_hot_labels:
one_hot_cols = pd.get_dummies(results['Cluster_Label'], prefix='Cluster_Label')
one_hot_col_names = one_hot_cols.columns.values.tolist()
results = pd.merge(results, one_hot_cols, left_index=True, right_index=True)
# Attach to data if specified
if return_df:
results = pd.merge(df, results, left_index=True, right_index=True)
# Return all or just cluster results
if return_type == 'all':
return results, vectorizer, clusterer
elif return_type == 'clusters':
return results
def dask_df_textacy_apply(df, text_col, textacy_col_name='textacy_doc', ncores=None, inplace=False):
"""
Use dask to parallelize apply textacy Doc object creation from a dataframe
Parameters
----------
df : DataFrame
Dataframe which holds the text
text_col : str
The name of the text column in the df
textacy_col_name : str
The name to give to the column with the textacy doc objects
ncores : int
Number of cores to use for multiprocessing. Defaults to all cores in cpu minus one.
inplace : bool
Whether to return the entire df with the textacy doc series concatenated
or only textacy doc series.
Default is False
Returns
-------
DataFrame / Series
Either the dataframe passed as arg with the textacy series as last column or
just the textacy column
"""
# If no number of cores to work with, default to max
if not ncores:
nCores = cpu_count() - 1
nCores
# Partition dask dataframe and map textacy doc apply
# Sometimes this fails because it can't infer the dtypes correctly
# meta=pd.Series(name=0, dtype='object') is a start
# This is also a start https://stackoverflow.com/questions/40019905/how-to-map-a-column-with-dask?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# Possibly both the inner lambda apply and outer lambda df both need metadata?
textacy_series = dd.from_pandas(df, npartitions=nCores).map_partitions(
lambda df : df[text_col].apply(lambda x : textacy.doc.Doc(x, lang=nlp))).compute(get=get)
# Name the series
textacy_series.name = textacy_col_name
# If inplace return the dataframe and textacy Series
if inplace:
return pd.concat([df, textacy_series], axis=1)
# Else return just the Textacy series
else:
return textacy_series
def load_textacy_corpus(df, text_col, metadata=True, metadata_columns=None):
# Fill text columns nulls with empty strings
df[text_col] = df[text_col].fillna('')
if metadata:
# Default to metadata columns being every column except the text column
metadata_cols = list(df.columns)
# If list is provided use those
if metadata_columns:
metadata_cols = metadata_columns
# Add text column to metadata columns
# These will constitute all the information held in the textacy corpus
metadata_columns.append(text_col)
# Subset to these
df = df[metadata_cols]
# Convert to nested dict of records
records = df.to_dict(orient='records')
# Split into text and metadata stream
text_stream, metadata_stream = textacy.io.split_records(records, text_col)
# Create Corpus
return textacy.corpus.Corpus(lang='en', texts=text_stream, metadatas=metadata_stream)
# With no metadata
else:
text_stream = (text for text in df[text_col].values)
return textacy.corpus.Corpus(lang='en', texts=text_stream)
# Entity Extraction
def corpus_entity_counts(corpus, include=None, exclude=None):
"""
Given a textacy corpus, return a dataframe of entities and their respective counts.
Parameters
----------
corpus : int
Description of arg1
include : str or Set[str]
Remove named entities whose type IS NOT in this param;
if “NUMERIC”, all numeric entity types (“DATE”, “MONEY”, “ORDINAL”, etc.) are included
exclude : str or Set[str]
remove named entities whose type IS in this param; if “NUMERIC”,
all numeric entity types (“DATE”, “MONEY”, “ORDINAL”, etc.) are excluded
Returns
-------
Dataframe
A pandas dataframe with entities and their respective counts, sorted by highest count
"""
from collections import Counter
# Extract all entities
entities = [list(textacy.extract.named_entities(doc, include_types=include, exclude_types=exclude))
for doc in
corpus]
# Pull all non-null entities to flattened list
non_null_entities = []
for entity in entities:
if entity:
non_null_entities.extend(entity)
# Change dtype to string so counter can distinguish
non_null_entities = [str(x) for x in non_null_entities]
# Count entities
entity_counts = Counter(non_null_entities)
# Entity Dataframe
df = (pd.DataFrame.from_dict(entity_counts, orient='index')
.reset_index()
.rename(columns={'index':'Entity', 0:'Count'})
.sort_values(by='Count', ascending=False)
.reset_index(drop=True))
return df
def entity_statements(doc, entity, ignore_entity_case=True,
min_n_words=1, max_n_words=300, return_entity=False):
"""
Extract sentences with a specified entity present in it
Modified from source code of Textacy's textacy.extract.semistructured_statements()
Args:
doc (``textacy.Doc`` or ``spacy.Doc``)
entity (str): a noun or noun phrase of some sort (e.g. "President Obama",
"global warming", "Python")
ignore_entity_case (bool): if True, entity matching is case-independent
min_n_words (int): min number of tokens allowed in a matching fragment
max_n_words (int): max number of tokens allowed in a matching fragment
Yields:
(``spacy.Span`` or ``spacy.Token``) or (``spacy.Span`` or ``spacy.Token``, ``spacy.Span`` or ``spacy.Token``):
dependin on if return_entity is enabled or not
Notes:
Inspired by <NAME>, <NAME>, <NAME>. Visual Analytics of
Media Frames in Online News and Blogs. IEEE InfoVis Workshop on Text
Visualization. October, 2013.
Which itself was inspired by by <NAME>.; <NAME>.; <NAME>.; and
<NAME>. 2010. Portable Extraction of Partially Structured Facts from
the Web. In Proc. ICETAL 2010, LNAI 6233, 345-356. Heidelberg, Springer.
"""
if ignore_entity_case is True:
entity_toks = entity.lower().split(' ')
get_tok_text = lambda x: x.lower_
else:
entity_toks = entity.split(' ')
get_tok_text = lambda x: x.text
first_entity_tok = entity_toks[0]
n_entity_toks = len(entity_toks)
#cue = cue.lower()
#cue_toks = cue.split(' ')
#n_cue_toks = len(cue_toks)
def is_good_last_tok(tok):
if tok.is_punct:
return False
if tok.pos in {CONJ, DET}:
return False
return True
for sent in doc.sents:
for tok in sent:
# filter by entity
if get_tok_text(tok) != first_entity_tok:
continue
if n_entity_toks == 1:
the_entity = tok
the_entity_root = the_entity
elif all(get_tok_text(tok.nbor(i=i + 1)) == et for i, et in enumerate(entity_toks[1:])):
the_entity = doc[tok.i: tok.i + n_entity_toks]
the_entity_root = the_entity.root
else:
continue
if return_entity:
yield (the_entity, sent.orth_)
else:
yield (sent.orth_)
break
def list_of_entity_statements(corpus, entity):
"""
Given an entity and a textacy corpus, return a list of all the sentences in which this entity occurs
Parameters
----------
corpus : textacy Corpus object
entity : str
The entity for which to search all the sentences within the corpus
Returns
-------
entity_sentences
A list of strings, each being a sentence which contains the entity search
"""
entity_sentences = [list(entity_statements(doc, entity=entity))
for doc
in corpus
if list(entity_statements(doc, entity=entity))] # If statement that removes null sentences
entity_sentences = [item for sublist in entity_sentences for item in sublist]
return entity_sentences
# Entity Sentiment extractions
def vader_entity_sentiment(df,
textacy_col,
entity,
inplace=True,
vader_sent_types=['neg', 'neu', 'pos', 'compound'],
keep_stats=['count', 'mean', 'min', '25%', '50%', '75%', 'max']):
"""
Pull the descriptive sentiment stats of text sentence with a specified entity in it.
Parameters
----------
df : DataFrame
Dataframe which holds the text
textacy_col : str
The name to give to the column with the textacy doc objects
entity : str
The entity to search the textacy Doc object for
inplace : bool
Whether to return the entire df with the sentiment info or the sentiment info alone
Default is False
vader_sent_types : list
The type of sentiment to extract. neg: negative, pos: positive, neu: neutral, compound is
comination of all three types of all
keep_stats : list
A list of the summary statistics to keep. Default is all returned by pandas DataFrame.describe() method
Returns
-------
DataFrame
Either the dataframe passed as arg with the sentiment info as trailing columns
or the sentiment descriptive stats by itself
"""
vader_analyzer = SentimentIntensityAnalyzer()
sentiment_rows = []
for text in df[textacy_col].values:
text_entities = list(entity_statements(text, entity))
# Iterate through all sentences and get sentiment analysis
entity_sentiment_info = [vader_analyzer.polarity_scores(sentence)
for
sentence
in
text_entities]
# After taking sentiments, turn into a dataframe and describe
try:
# Indices and columns to keep
keep_stats = keep_stats
keep_cols = vader_sent_types
# Describe those columns
summary_stats = pd.DataFrame(entity_sentiment_info).describe().loc[keep_stats, keep_cols]
# Add row to list
sentiment_rows.append(pivot_df_to_row(summary_stats))
# If there's nothing to describe
except ValueError as e:
# Create a summary stats with nulls
summary_stats = pd.DataFrame(index=keep_stats, columns=keep_cols)
# Add to list of rows
sentiment_rows.append(pivot_df_to_row(summary_stats))
# Concatenate All rows together into one dataframe
sentiment_df = pd.concat(sentiment_rows).add_prefix(entity+'_')
if not inplace:
return sentiment_df.reset_index(drop=True)
else:
# Return original df with new sentiment attached
return pd.concat([df, sentiment_df], axis=1)
def textblob_entity_sentiment(df,
textacy_col,
entity,
inplace=True,
subjectivity=False,
keep_stats=['count', 'mean', 'min', '25%', '50%', '75%', 'max']):
"""
Pull the descriptive sentiment stats of text sentence with a specified entity in it.
Parameters
----------
df : DataFrame
Dataframe which holds the text
textacy_col : str
The name to give to the column with the textacy doc objects
entity : str
The entity to search the textacy Doc object for
inplace : bool
Whether to return the entire df with the sentiment info or the sentiment info alone
Default is False
subjectivity : bool
Whether to include the subjectivity of the sentiment. Defaults to False.
keep_stats : list
A list of the summary statistics to keep. Default is all returned by pandas DataFrame.describe() method
Returns
-------
DataFrame
Either the dataframe passed as arg with the sentiment info as trailing columns
or the sentiment descriptive stats by itself
"""
sentiment_rows = []
for text in df[textacy_col].values:
text_entities = list(entity_statements(text, entity))
# Iterate through all sentences and get sentiment analysis
entity_sentiment_info = [textblob.TextBlob(sentence).sentiment_assessments
for
sentence
in
text_entities]
# After taking sentiments, turn into a dataframe and describe
try:
# Indices and columns to keep
#keep_stats = ['count', 'mean', 'min', '25%', '50%', '75%', 'max']
keep_cols = ['polarity']
# If subjectivity is set to true, values for it will also be captured
if subjectivity:
keep_cols.append('subjectivity')
# Describe those columns
summary_stats = pd.DataFrame(entity_sentiment_info).describe().loc[keep_stats, keep_cols]
# Add row to list
sentiment_rows.append(pivot_df_to_row(summary_stats))
# If there's nothing to describe
except ValueError as e:
# Create a summary stats with nulls
summary_stats = pd.DataFrame(index=keep_stats, columns=keep_cols)
# Add to list of rows
sentiment_rows.append(pivot_df_to_row(summary_stats))
# Concatenate All rows together into one dataframe
sentiment_df = | pd.concat(sentiment_rows) | pandas.concat |
from codonPython.tolerance import check_tolerance
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
## TODO migrate from numpy arrays to pandas series/dataframes
testdata = [
pd.Series([1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242]),
| pd.Series([1, 2, 3, 4, 5, 5.5, 6, 6.5, 7]) | pandas.Series |
"""
Calculate transition matrix for each section
in the supermarket
"""
import datetime
import pandas as pd
# correct data (customers with no marked checkout)
def missing_checkout(data):
"""fixes data quality issue:
last customers of the day are missing from checkout """
data["timestamp"] = | pd.to_datetime(data["timestamp"]) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for visualizing the results
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import os
import seaborn
import pandas as pd
from matplotlib import colors as mcolors
seaborn.set_style("darkgrid")
def draw_embedding(embs, names, resultpath, algos, show_label):
"""Function to draw the embedding.
Args:
embs (matrix): Two dimesnional embeddings.
names (list):List of string name.
resultpath (str):Path where the result will be save.
algos (str): Name of the algorithms which generated the algorithm.
show_label (bool): If True, prints the string names of the entities and relations.
"""
print("\t drawing figure!")
pos = {}
node_color_mp = {}
unique_ent = set(names)
colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())
tot_col = len(colors)
j = 0
for i, e in enumerate(unique_ent):
node_color_mp[e] = colors[j]
j += 1
if j >= tot_col:
j = 0
G = nx.Graph()
hm_ent = {}
for i, ent in enumerate(names):
hm_ent[i] = ent
G.add_node(i)
pos[i] = embs[i]
colors = []
for n in list(G.nodes):
colors.append(node_color_mp[hm_ent[n]])
plt.figure()
nodes_draw = nx.draw_networkx_nodes(G,
pos,
node_color=colors,
node_size=50)
nodes_draw.set_edgecolor('k')
if show_label:
nx.draw_networkx_labels(G, pos, font_size=8)
if not os.path.exists(resultpath):
os.mkdir(resultpath)
files = os.listdir(resultpath)
file_no = len(
[c for c in files if algos + '_embedding_plot' in c])
filename = algos + '_embedding_plot_' + str(file_no) + '.png'
plt.savefig(str(resultpath / filename), bbox_inches='tight', dpi=300)
# plt.show()
def draw_embedding_rel_space(h_emb,
r_emb,
t_emb,
h_name,
r_name,
t_name,
resultpath,
algos,
show_label):
"""Function to draw the embedding in relation space.
Args:
h_emb (matrix): Two dimesnional embeddings of head.
r_emb (matrix): Two dimesnional embeddings of relation.
t_emb (matrix): Two dimesnional embeddings of tail.
h_name (list):List of string name of the head.
r_name (list):List of string name of the relation.
t_name (list):List of string name of the tail.
resultpath (str):Path where the result will be save.
algos (str): Name of the algorithms which generated the algorithm.
show_label (bool): If True, prints the string names of the entities and relations.
"""
print("\t drawing figure!")
pos = {}
node_color_mp_ent = {}
node_color_mp_rel = {}
unique_ent = set(h_name) | set(t_name)
unique_rel = set(r_name)
colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())
tot_col = len(colors)
j = 0
for i, e in enumerate(unique_ent):
node_color_mp_ent[e] = colors[j]
j += 1
if j >= tot_col:
j = 0
tot_col = len(colors)
j = 0
for i, r in enumerate(unique_rel):
node_color_mp_rel[r] = colors[j]
j += 1
if j >= tot_col:
j = 0
G = nx.DiGraph()
idx = 0
head_colors = []
rel_colors = []
tail_colors = []
head_nodes = []
tail_nodes = []
rel_nodes = []
for i in range(len(h_name)):
G.add_edge(idx, idx + 1)
G.add_edge(idx + 1, idx + 2)
head_nodes.append(idx)
rel_nodes.append(idx + 1)
tail_nodes.append(idx + 2)
head_colors.append(node_color_mp_ent[h_name[i]])
rel_colors.append(node_color_mp_rel[r_name[i]])
tail_colors.append(node_color_mp_ent[t_name[i]])
pos[idx] = h_emb[i]
pos[idx + 1] = r_emb[i]
pos[idx + 2] = t_emb[i]
idx += 3
plt.figure()
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=head_nodes,
node_color=head_colors,
node_shape='o',
node_size=50)
nodes_draw.set_edgecolor('k')
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=rel_nodes,
node_color=rel_colors,
node_size=50,
node_shape='D',
with_labels=show_label)
nodes_draw.set_edgecolor('k')
nodes_draw = nx.draw_networkx_nodes(G,
pos,
nodelist=tail_nodes,
node_color=tail_colors,
node_shape='*',
node_size=50)
nodes_draw.set_edgecolor('k')
if show_label:
nx.draw_networkx_labels(G, pos, font_size=8)
nx.draw_networkx_edges(G, pos, arrows=True, width=0.5, alpha=0.5)
if not os.path.exists(resultpath):
os.mkdir(resultpath)
files = os.listdir(resultpath)
file_no = len(
[c for c in files if algos + '_embedding_plot' in c])
plt.savefig(str(resultpath / (algos + '_embedding_plot_' + str(file_no) + '.png')), bbox_inches='tight', dpi=300)
# plt.show()
class Visualization(object):
"""Class to aid in visualizing the results and embddings.
Args:
model (object): Model object
vis_opts (list): Options for visualization.
sess (object): TensorFlow session object, initialized by the trainer.
Examples:
>>> from pykg2vec.utils.visualization import Visualization
>>> from pykg2vec.utils.trainer import Trainer
>>> from pykg2vec.core.TransE import TransE
>>> model = TransE()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
>>> viz = Visualization(model=model)
>>> viz.plot_train_result()
"""
def __init__(self, model=None, vis_opts=None, sess=None):
self.sess = sess
if vis_opts:
self.ent_only_plot = vis_opts["ent_only_plot"]
self.rel_only_plot = vis_opts["rel_only_plot"]
self.ent_and_rel_plot = vis_opts["ent_and_rel_plot"]
else:
self.ent_only_plot = False
self.rel_only_plot = False
self.ent_and_rel_plot = False
self.model = model
self.algo_list = ['Complex', 'ConvE','HoLE', 'DistMult', 'DistMult2', 'KG2E_EL','KG2E_KL',
'KGMeta', 'NTN', 'ProjE_pointwise', 'Rescal',
'RotatE', 'SLM', 'SME_Bilinear','SME_Linear', 'TransD', 'TransE', 'TransH',
'TransM', 'TransR', 'TuckER']
self.h_name = []
self.r_name = []
self.t_name = []
self.h_emb = []
self.r_emb = []
self.t_emb = []
self.h_proj_emb = []
self.r_proj_emb = []
self.t_proj_emb = []
if self.model != None:
self.validation_triples_ids = self.model.config.knowledge_graph.read_cache_data('triplets_valid')
self.idx2entity = self.model.config.knowledge_graph.read_cache_data('idx2entity')
self.idx2relation = self.model.config.knowledge_graph.read_cache_data('idx2relation')
self.get_idx_n_emb()
def get_idx_n_emb(self):
"""Function to get the integer ids and the embedding."""
idx = np.random.choice(len(self.validation_triples_ids), self.model.config.disp_triple_num)
triples = []
for i in range(len(idx)):
triples.append(self.validation_triples_ids[idx[i]])
for t in triples:
self.h_name.append(self.idx2entity[t.h])
self.r_name.append(self.idx2relation[t.r])
self.t_name.append(self.idx2entity[t.t])
emb_h, emb_r, emb_t = self.model.get_embed(t.h, t.r, t.t, self.sess)
self.h_emb.append(emb_h)
self.r_emb.append(emb_r)
self.t_emb.append(emb_t)
if self.ent_and_rel_plot:
try:
emb_h, emb_r, emb_t = self.model.get_proj_embed(t.h, t.r, t.t, self.sess)
self.h_proj_emb.append(emb_h)
self.r_proj_emb.append(emb_r)
self.t_proj_emb.append(emb_t)
except Exception as e:
print(e.args)
def plot_embedding(self,
resultpath=None,
algos=None,
show_label=False,
disp_num_r_n_e = 20):
"""Function to plot the embedding.
Args:
resultpath (str): Path where the result will be saved.
show_label (bool): If True, will display the labels.
algos (str): Name of the algorithms that generated the embedding.
disp_num_r_n_e (int): Total number of entities to display for head, tail and relation.
"""
if not self.model:
raise NotImplementedError('Please provide a model!')
if self.ent_only_plot:
x = np.concatenate((self.h_emb, self.t_emb), axis=0)
ent_names = np.concatenate((self.h_name, self.t_name), axis=0)
print("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x)
x = np.asarray(x)
ent_names = np.asarray(ent_names)
draw_embedding(x, ent_names, resultpath, algos + '_entity_plot', show_label)
if self.rel_only_plot:
x = self.r_emb
print("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x)
draw_embedding(x, self.r_name, resultpath, algos + '_rel_plot', show_label)
if self.ent_and_rel_plot:
length = len(self.h_proj_emb)
x = np.concatenate((self.h_proj_emb, self.r_proj_emb, self.t_proj_emb), axis=0)
print("\t Reducing dimension using TSNE to 2!")
x = TSNE(n_components=2).fit_transform(x)
h_embs = x[:length, :]
r_embs = x[length:2 * length, :]
t_embs = x[2 * length:3 * length, :]
draw_embedding_rel_space(h_embs[:disp_num_r_n_e],
r_embs[:disp_num_r_n_e],
t_embs[:disp_num_r_n_e],
self.h_name[:disp_num_r_n_e],
self.r_name[:disp_num_r_n_e],
self.t_name[:disp_num_r_n_e],
resultpath, algos + '_ent_n_rel_plot', show_label)
def plot_train_result(self):
"""Function to plot the training result."""
algo = self.algo_list
path = self.model.config.path_result
result = self.model.config.path_figures
data = [self.model.config.data]
files = os.listdir(str(path))
files_lwcase = [f.lower() for f in files]
for d in data:
df = pd.DataFrame()
for a in algo:
file_no = len([c for c in files_lwcase if a.lower() in c if 'training' in c])
if file_no < 1:
continue
with open(str(path / (a + '_Training_results_' + str(file_no - 1) + '.csv')), 'r') as fh:
df_2 = pd.read_csv(fh)
if df.empty:
df['Epochs'] = df_2['Epochs']
df['Loss'] = df_2['Loss']
df['Algorithm'] = [a] * len(df_2)
else:
df_3 = pd.DataFrame()
df_3['Epochs'] = df_2['Epochs']
df_3['Loss'] = df_2['Loss']
df_3['Algorithm'] = [a] * len(df_2)
frames = [df, df_3]
df = pd.concat(frames)
plt.figure()
ax = seaborn.lineplot(x="Epochs", y="Loss", hue="Algorithm",
markers=True, dashes=False, data=df)
files = os.listdir(str(result))
files_lwcase = [f.lower() for f in files]
file_no = len([c for c in files_lwcase if d.lower() in c if 'training' in c])
plt.savefig(str(result / (d + '_training_loss_plot_' + str(file_no) + '.pdf')), bbox_inches='tight', dpi=300)
# plt.show()
def plot_test_result(self):
"""Function to plot the testing result."""
algo = self.algo_list
path = self.model.config.path_result
result = self.model.config.path_figures
data = [self.model.config.data]
hits = self.model.config.hits
if path is None or algo is None or data is None:
raise NotImplementedError('Please provide valid path, algorithm and dataset!')
files = os.listdir(str(path))
# files_lwcase = [f.lower() for f in files if 'Testing' in f]
# print(files_lwcase)
for d in data:
df = pd.DataFrame()
for a in algo:
file_algo = [c for c in files if a.lower() in c.lower() if 'testing' in c.lower()]
if not file_algo:
continue
with open(str(path / file_algo[-1]), 'r') as fh:
df_2 = pd.read_csv(fh)
if df.empty:
df['Algorithm'] = [a] * len(df_2)
df['Epochs'] = df_2['Epoch']
df['Mean Rank'] = df_2['Mean Rank']
df['Filt Mean Rank'] = df_2['Filtered Mean Rank']
for hit in hits:
df['Hits' + str(hit)] = df_2['Hit-%d Ratio'%hit]
df['Filt Hits' + str(hit)] = df_2['Filtered Hit-%d Ratio'%hit]
else:
df_3 = pd.DataFrame()
df_3['Algorithm'] = [a] * len(df_2)
df_3['Epochs'] = df_2['Epoch']
df_3['Mean Rank'] = df_2['Mean Rank']
df_3['Filt Mean Rank'] = df_2['Filtered Mean Rank']
for hit in hits:
df_3['Hits' + str(hit)] = df_2['Hit-%d Ratio'%hit]
df_3['Filt Hits' + str(hit)] = df_2['Filtered Hit-%d Ratio'%hit]
frames = [df, df_3]
df = | pd.concat(frames) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["<NAME>", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
tm.assert_series_equal(result, expected)
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")])
tm.assert_series_equal(result, expected)
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False).tolist()
assert result == [v.partition("_") for v in s]
result = s.str.rpartition("_", expand=False).tolist()
assert result == [v.rpartition("_") for v in s]
def test_partition_index():
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = values.str.partition("_", expand=False)
exp = Index(
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition("_", expand=False)
exp = Index(
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition("_")
exp = Index(
[
("a", "_", "b_c"),
("c", "_", "d_e"),
("f", "_", "g_h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition("_")
exp = Index(
[
("a_b", "_", "c"),
("c_d", "_", "e"),
("f_g", "_", "h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_")
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_")
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=True)
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_", expand=True)
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name(any_string_dtype):
# GH 12617
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",")
expected = DataFrame(
{0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# should preserve name
result = s.str.partition(",", expand=False)
expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")
tm.assert_series_equal(result, expected)
def test_partition_index_with_name():
idx = Index(["a,b", "c,d"], name="xxx")
result = idx.str.partition(",")
expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])
assert result.nlevels == 3
tm.assert_index_equal(result, expected)
# should preserve name
result = idx.str.partition(",", expand=False)
expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")
assert result.nlevels == 1
tm.assert_index_equal(result, expected)
def test_partition_sep_kwarg(any_string_dtype):
# GH 22676; depr kwarg "pat" in favor of "sep"
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
expected = s.str.partition(sep="_")
result = s.str.partition("_")
tm.assert_frame_equal(result, expected)
expected = s.str.rpartition(sep="_")
result = s.str.rpartition("_")
tm.assert_frame_equal(result, expected)
def test_get():
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
result = ser.str.split("_").str.get(1)
expected = Series(["b", "d", np.nan, "g"])
tm.assert_series_equal(result, expected)
def test_get_mixed_object():
ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])
result = ser.str.split("_").str.get(1)
expected = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_get_bounds():
ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])
# positive index
result = ser.str.split("_").str.get(2)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = ser.str.split("_").str.get(-3)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
def test_get_complex():
# GH 20671, getting value not in dict raising `KeyError`
ser = | Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}]) | pandas.Series |
# -*- coding: utf-8 -*-
"""gender_detection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bu4brssep0L-q5nEmT9OBRykyBbvdu6S
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import feature_extraction, linear_model, model_selection, preprocessing
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch
import librosa
import librosa.display
import os,glob
from tqdm import tqdm_notebook
import re
import random
from random import randint
from sklearn.model_selection import train_test_split
#!pip install wget
#cd "/content/drive/My Drive/gender_detection/"
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
import requests
count=0
#79,80
for i in tqdm_notebook(range(83,84), total=1, unit="epoch"):
print(i)
parser = 'html.parser' # or 'lxml' (preferred) or 'html5lib', if installed
resp = requests.get("https://www.openslr.org/"+str(i)+"/")
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, parser, from_encoding=encoding)
for link in soup.find_all('a', href=True):
l=link["href"].split(".")
if l[len(l)-1]=="zip" or l[len(l)-1]=="tgz":
if l[1]=="openslr":
count=count+1
name=l[len(l)-2].split("/")
#print(link["href"],l[1],name[len(name)-1])
file_url = link["href"]
#print(file_url)
#!wget -c $file_url
# r = requests.get(file_url, stream = True)
# with open("/content/drive/My Drive/gender_detection/"+name[len(name)-1]+"."+l[len(l)-1], "wb") as file:
# for block in r.iter_content(chunk_size = 1024):
# if block:
# file.write(block)
#cd /content/drive/My Drive/gender_detection/female/
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/female/", '*.zip')):
print(filename)
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l[len(l)-1]
l_2=l_1[len(l_1)-1].split(".")
print(l_2[0])
#!unzip $t -d "/content/drive/My Drive/gender_detection/female_unzipped/"
min=100000
count=0
count1=0
g=[]
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/male_unzipped/", '*.wav')):
count1=count1+1
#print(filename)
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l_1[0]
#print(t)
if t not in g:
g.append(t)
#print(count1)
print(g,count1)
#!pip install soundfile
import soundfile as sf
ob = sf.SoundFile("/content/drive/My Drive/gender_detection/male_unzipped/clm_00610_00556859411.wav")
print(ob.samplerate)
for i in range(len(g)):
g=['nom', 'wem', 'mim', 'som', 'irm', 'scm']
data_speech=pd.DataFrame(columns=["S1","sr","Gender"])
hop_length = 512
n_mels =128
n_fft = 2048
#count=0
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/male_unzipped/", '*.wav')):
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l_1[0]
if t == g[i]:
y, sr = librosa.load(filename,sr=None)
#print(librosa.load(filename,sr=None))
# trim silent edges
speech, _ = librosa.effects.trim(y)
#speech=speech[:100000]
if speech.shape[0]>100000:
#print(speech.shape[0])
speech=speech[:100000]
#print(speech.shape[0])
S1=librosa.feature.mfcc(y=speech,sr=sr)
#print(S1)
gender="male"
# if gender == "f":
# gender="female"
# if gender == "m":
# gender = "male"
temp=[]
temp1=[]
temp2=[]
temp.append(np.array(S1))
temp1.append(gender)
temp2.append(np.array(sr))
#print(temp)
df_temp=pd.DataFrame(list(zip(temp,temp2,temp1)),columns=["S1","sr","Gender"])
data_speech=data_speech.append(df_temp)
print(data_speech.shape)
data_speech.to_pickle("/content/drive/My Drive/gender_speech_male_"+str(16+i)+".pkl")
#cd /content/drive/My Drive/gender_detection/spanish
#mkdir spanish
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/aida_tang_1/aidatatang_200zh/corpus/train", '*.tar.gz')):
print(filename)
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l[len(l)-1]
l_2=l_1[len(l_1)-1].split(".")
print(t)
#!tar -xvzf $t -C "/content/drive/My Drive/gender_detection/aida_tang/"
#!tar -xvzf "/content/drive/My Drive/gender_detection/tedx_spanish_corpus.tgz" -C "/content/drive/My Drive/gender_detection/spanish/"
for i in range(len(g)):
data_speech=pd.DataFrame(columns=["S1","sr","Gender"])
hop_length = 512
n_mels =128
n_fft = 2048
#count=0
for filename in glob.glob(os.path.join("/content/drive/My Drive/gender_detection/female_1/", '*.wav')):
l=filename.split("/")
l_1=l[len(l)-1].split("_")
t=l_1[0]
if t == g[i]:
y, sr = librosa.load(filename,sr=None)
#print(librosa.load(filename,sr=None))
# trim silent edges
speech, _ = librosa.effects.trim(y)
#speech=speech[:100000]
if speech.shape[0]>100000:
#print(speech.shape[0])
speech=speech[:100000]
#print(speech.shape[0])
S1=librosa.feature.mfcc(y=speech,sr=sr)
#print(S1)
gender="female"
# if gender == "f":
# gender="female"
# if gender == "m":
# gender = "male"
temp=[]
temp1=[]
temp2=[]
temp.append(np.array(S1))
temp1.append(gender)
temp2.append(np.array(sr))
#print(temp)
df_temp=pd.DataFrame(list(zip(temp,temp2,temp1)),columns=["S1","sr","Gender"])
data_speech=data_speech.append(df_temp)
print(data_speech.shape)
data_speech.to_pickle("/content/drive/My Drive/gender_speech_female_"+str(i+2)+".pkl")
df_1=pd.read_pickle("/content/drive/My Drive/gender_speech_male_1.pkl")
df_1.head()
df_male=pd.DataFrame(columns=["S1","sr","Gender"])
for i in range(16,22):
df_1=pd.read_pickle("/content/drive/My Drive/gender_speech_male_"+str(i)+".pkl")
df_male=df_male.append(df_1)
# train_inputs, test_inputs, train_labels, test_labels = train_test_split(df_male["S1"], df_male["Gender"],random_state=2018, test_size=0.1)
# Scaler=StandardScaler()
# train_inputs=Scaler.fit_transform(train_inputs)
# test_inputs=Scaler.transform(test_inputs)
# data_male_train = {"S1": train_inputs,
# "Gender": train_labels}
# df_male_train = pd.concat(data_male_train,
# axis = 1)
# data_male_test = {"S1": test_inputs,
# "Gender": test_labels}
# df_male_test = pd.concat(data_male_test,
# axis = 1)
df_female=pd.DataFrame(columns=["S1","sr","Gender"])
for i in range(19,24):
df_1=pd.read_pickle("/content/drive/My Drive/gender_speech_female_"+str(i)+".pkl")
df_female=df_female.append(df_1)
# train_inputs, test_inputs, train_labels, test_labels = train_test_split(df_female["S1"], df_female["Gender"],random_state=2018, test_size=0.1)
# Scaler=StandardScaler()
# train_inputs=Scaler.fit_transform(train_inputs)
# test_inputs=Scaler.transform(test_inputs)
# data_female_train = {"S1": train_inputs,
# "Gender": train_labels}
# df_female_train = pd.concat(data_female_train,
# axis = 1)
# data_female_test = {"S1": test_inputs,
# "Gender": test_labels}
# df_female_test = pd.concat(data_female_test,
# axis = 1)
df=pd.DataFrame(columns=["S1","sr","Gender"])
df=df.append(df_male)
df=df.append(df_female)
df=df.sample(frac=1)
# df_test=pd.DataFrame(columns=["S1","Gender"])
# df_test=df_test.append(df_male_test)
# df_test=df_test.append(df_female_test)
# df_test=df_test.sample(frac=1)
df.to_pickle("/content/drive/My Drive/gender_detection/gender_speech_english.pkl")
df_0=pd.read_pickle("/content/drive/My Drive/gender_detection/gender_speech.pkl")
df_1= | pd.read_pickle("/content/drive/My Drive/gender_detection/gender_speech_english.pkl") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 12:17:34 2018
@author: Chandar_S
"""
import pandas as pd
import os
from scipy.misc import imread
import numpy as np
import h5py
from urllib.request import urlopen
#from tensorflow.examples.tutorials.mnist import input_data
class nn_utilities:
data_path = None
def __init__(self, path):
self.data_path = path
def convert_to_onehot(self, series):
return pd.get_dummies(series).values
##### START: PREP DATA ######
def prepare_digits_image_inputs(self):
data_dir = os.path.abspath(self.data_path + 'Image')
# check for existence
os.path.exists(data_dir)
train = pd.read_csv(os.path.join(data_dir, 'Numbers_Train_Mapping-5000.csv'))
test = pd.read_csv(os.path.join(data_dir, 'Numbers_Test_Mapping.csv'))
# GET THE TEST AND VALIDATION DATA
temp = []
for img_name in train.filename:
image_path = os.path.join(data_dir, 'Numbers', 'Images', 'train', img_name)
img = imread(image_path, flatten=True)
img = img.astype('float32')
temp.append(img)
# convert list to ndarray and PREP AS PER INPUT FORMAT
x_train = np.stack(temp)
x_train = x_train.reshape(-1, x_train.shape[1] * x_train.shape[2])
## GET THE TEST DATA
temp = []
for img_name in test.filename:
image_path = os.path.join(data_dir, 'Numbers', 'Images', 'test', img_name)
img = imread(image_path, flatten=True)
img = img.astype('float32')
temp.append(img)
# convert list to ndarray and PREP AS PER INPUT FORMAT
x_test = np.stack(temp)
x_test = x_test.reshape(-1, x_test.shape[1] * x_test.shape[2])
return self.prep_returndata(x_train, train.label, None, None, "local_digits_data", 1,
x_test, test, data_dir)
##### END : PREP DATA #######
def load_mnist(self, path, kind='train'):
import gzip
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte.gz'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784)
return images, labels
def load_fashion_data(self):
x_train, y_train = self.load_mnist(self.data_path + 'Image\Fashion', kind='train')
x_validation, y_validation = self.load_mnist(self.data_path + 'Image\Fashion', kind='t10k')
return self.prep_returndata(x_train, y_train, x_validation, y_validation, "mnist_fashion_data")
def load_mnist_digit_data(self):
x_train, y_train = self.load_mnist(self.data_path + 'Image\MNIST_Digit_data', kind='train')
x_validation, y_validation = self.load_mnist(self.data_path + 'Image\MNIST_Digit_data', kind='t10k')
return self.prep_returndata(x_train, y_train, x_validation, y_validation, "mnist_digit_data")
def load_emnist_alphadigit_data(self):
train = pd.read_csv(self.data_path + 'Image\emnist_alphadigit_data\emnist-balanced-train.csv', header=None)
test = pd.read_csv(self.data_path + 'Image\emnist_alphadigit_data\emnist-balanced-test.csv', header=None)
x_train_data, y_train = train.iloc[:, 1:].values, train.iloc[:, 0].values
x_validation_data, y_validation = pd.get_dummies(test.iloc[:, 1:]), pd.get_dummies(test.iloc[:, 0])
x_train = np.apply_along_axis(self.rotate, 1, x_train_data)
x_validation = np.apply_along_axis(self.rotate, 1, x_validation_data)
del x_train_data, x_validation_data
return self.prep_returndata(x_train, y_train, x_validation, y_validation, "emnist_alpha_digit_data")
def load_emnist_alphadigit_data_google_collab(self):
train = pd.read_csv(self.data_path + 'emnist-balanced-train.csv', header=None)
test = pd.read_csv(self.data_path + 'emnist-balanced-test.csv', header=None)
x_train_data, y_train = train.iloc[:, 1:].values, train.iloc[:, 0].values
x_validation_data, y_validation = pd.get_dummies(test.iloc[:, 1:]), pd.get_dummies(test.iloc[:, 0])
x_train = np.apply_along_axis(self.rotate, 1, x_train_data)
x_validation = np.apply_along_axis(self.rotate, 1, x_validation_data)
del x_train_data, x_validation_data
return self.prep_returndata(x_train, y_train, x_validation, y_validation, "emnist_alpha_digit_data")
def load_emnist_letters_data(self):
train = pd.read_csv(self.data_path + 'Image\EMINIST_EnglishLetters\emnist-letters-train.csv', header=None)
test = pd.read_csv(self.data_path + 'Image\EMINIST_EnglishLetters\emnist-letters-test.csv', header=None)
x_train_data, y_train = train.iloc[:, 1:].values, train.iloc[:, 0].values
x_validation_data, y_validation = pd.get_dummies(test.iloc[:, 1:]), | pd.get_dummies(test.iloc[:, 0]) | pandas.get_dummies |
import multiprocessing as mp
import os
import tempfile
import shutil
import dask.dataframe as dd
import dask.diagnostics
import genomepy
from gimmemotifs.scanner import scan_regionfile_to_table
from gimmemotifs.utils import pfmfile_location
from loguru import logger
import numpy as np
import pandas as pd
import pickle
import pysam
import qnorm
from scipy import stats
from sklearn.preprocessing import minmax_scale
from ananse.utils import (
bed_sort,
bed_merge,
bam_index,
bam_sort,
mosdepth,
)
from ananse.distributions import Distributions
class CombineBedFiles:
def __init__(self, genome, peakfiles, verbose=True):
self.genome = genome
self.list_of_peakfiles = (
peakfiles if isinstance(peakfiles, list) else [peakfiles]
)
self.verbose = verbose
@staticmethod
def is_narrowpeak(bed, check_values=True):
"""
Check BED type by column count.
Check if peak values are not all zeroes unless check_values is False.
Accepts a BED file (including narrowPeak, broadPeak, etc.)
Returns bool
"""
with open(bed) as b:
for line in b:
if line.startswith("#"):
continue
line = line.split("\t")
cols = len(line)
break
# narrowPeak has 10 columns
# and the peak column is >= 0
if cols != 10 or int(line[9]) < 0:
return False
if not check_values:
return True
# check if the peak values aren't all zeroes
summit_values = 0
sample_size = 20 # check an arbitrary number of lines
with open(bed) as b:
for n, line in enumerate(b):
if line.startswith("#"):
continue
line = line.split("\t")
peak_val = int(line[9])
# value must be >=0
if peak_val < 0:
return False
summit_values += peak_val
if n >= sample_size:
break
if summit_values > 0:
return True
return False
@staticmethod
def bed_resize(
genome,
bed_in,
bed_out,
width=200,
narrowpeak=False,
fix_outliers=False,
output_bed3=True,
verbose=True,
):
"""
Set bed region width.
If the input bed is a narrowPeak file (narrowpeak=True),
center region on the summit (start+peak).
Otherwise center on the middle of the region.
If fix_outliers is set to True, shift regions to fit their chromosomes.
Otherwise drop these regions.
If output_bed3 is set to False, output the whole bed file.
"""
half_seqlen = width // 2
chrom_sizes = genomepy.Genome(genome).sizes
missing_chrm = []
if narrowpeak:
def get_summit(_start, _, summit_offset):
return _start + int(summit_offset)
summit_col = 9
else:
def get_summit(_start, _end, _):
return (_start + _end) // 2
summit_col = 0 # unused
with open(bed_in) as old, open(bed_out, "w") as new:
for line in old:
if line.startswith("#"):
continue
line = line.split("\t")
chrm = str(line[0])
if chrm not in chrom_sizes.keys():
missing_chrm.append(chrm)
continue
start = int(line[1])
end = int(line[2])
rest = line[3:] if not output_bed3 else []
chrm_len = chrom_sizes[chrm]
if width == end - start:
nstart = str(start)
nend = str(end)
elif chrm_len <= width:
if not fix_outliers:
continue
nstart = str(0)
nend = str(chrm_len)
else:
summit = get_summit(start, end, line[summit_col])
if not fix_outliers:
nstart = str(summit - half_seqlen)
nend = str(summit + half_seqlen)
if int(nstart) < 0 or int(nend) > chrm_len:
continue
else:
# adjust the summit for the chromosome boundaries
summit = max(summit, 0 + half_seqlen)
summit = min(summit, chrm_len - half_seqlen)
nstart = str(summit - half_seqlen)
nend = str(summit + half_seqlen)
new.write("\t".join([chrm, nstart, nend] + rest) + "\n")
if missing_chrm and verbose:
logger.warning(
"The following contigs were present in "
+ f"'{os.path.basename(bed_in)}', "
+ "but were missing in the genome file: "
+ f"{', '.join(list(set(missing_chrm)))}\n"
)
return bed_out
def run(self, outfile, width=200, force=False):
if force or not os.path.exists(outfile):
if self.verbose:
logger.info("Combining bed files")
tmpdir = tempfile.mkdtemp(prefix="ANANSE_")
try:
list_of_beds = []
for peakfile in self.list_of_peakfiles:
# use narrowPeak Peak location for region centering if possible
is_np = self.is_narrowpeak(peakfile)
resized_peakfile = os.path.join(tmpdir, os.path.basename(peakfile))
# resize each BED region to 200 BP
self.bed_resize(
genome=self.genome,
bed_in=peakfile,
bed_out=resized_peakfile,
width=width,
narrowpeak=is_np,
verbose=self.verbose,
)
bed_sort(resized_peakfile)
list_of_beds.append(resized_peakfile)
# merge resized beds into one
merged_bed = os.path.join(tmpdir, "merged")
bed_merge(list_of_beds=list_of_beds, merged_bed=merged_bed)
shutil.copy2(merged_bed, outfile)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
class ScorePeaks:
def __init__(self, bams, bed, ncore=1, verbose=True):
self.list_of_bams = bams if isinstance(bams, list) else [bams]
self.bed = bed # one bed file with all putative enhancer binding regions
self.verbose = verbose
self.ncore = ncore
def compatibility_check(self):
"""
Check if any chromosome in each bams file are found in the bed file.
This filters out datasets mapped to different genomes.
"""
error = False
bed_chromosomes = set(
pd.read_csv(self.bed, sep="\t", header=None)[0].astype(str)
)
for bam in self.list_of_bams:
bam_header = pysam.view(bam, "-H").split("\n") # noqa: pysam bug
for line in bam_header:
if not line.startswith("@SQ"):
continue
# extract chrom (ex: '@SQ\tSN:chr11\tLN:100316')
chrom = line.split("\tSN:")[1].split("\tLN:")[0]
# if any chrom matches: next bam
if chrom in bed_chromosomes:
break
else:
logger.exception(
f"Chromosomes in the peak file(s) do not match any in bam file '{os.path.basename(bam)}'!\n"
f"Does {self.bed} contain any regions, and "
"are both bam- and peak file(s) mapped to the same genome assembly?\n"
)
error = True
if error:
exit(1)
def peaks_count(self, outdir):
"""
count bam reads in the bed regions
returns one bed file for each bam in outdir
"""
# linear script:
# coverage_files = []
# for bam in self.list_of_bams:
# bed_output = os.path.join(outdir, os.path.basename(bam).replace(".bam", ".regions.bed"))
# coverage_files.append(bed_output)
# mosdepth(self.bed, bam, bed_output, self.ncore)
# return coverage_files
# parallel script:
nbams = len(self.list_of_bams)
npool = min(self.ncore, nbams)
ncore = min(4, self.ncore // npool) # 1-4 cores/bam
# list with tuples. each tuple = one run
mosdepth_params = []
coverage_files = []
for bam in self.list_of_bams:
bed_output = os.path.join(
outdir, os.path.basename(bam).replace(".bam", ".regions.bed")
)
mosdepth_params.append((self.bed, bam, bed_output, ncore))
coverage_files.append(bed_output)
pool = mp.Pool(npool)
try:
pool.starmap_async(mosdepth, mosdepth_params)
finally: # To make sure processes are closed in the end, even if errors happen
pool.close()
pool.join()
return coverage_files
@staticmethod
def peaks_merge(coverage_files, bed_output, ncore=1):
"""
averages all peaks_count outputs
uses quantile normalization to normalize for read depth
returns one BED 3+1 file
"""
ncore = min(4, ncore)
bed = pd.read_csv(coverage_files[0], header=None, sep="\t")
if len(coverage_files) > 1:
for file in coverage_files[1:]:
scores = pd.read_csv(file, header=None, sep="\t")[3]
bed = pd.concat([bed, scores], axis=1)
scores = bed.iloc[:, 3:]
scores = qnorm.quantile_normalize(scores, axis=1, ncpus=ncore)
scores = scores.mean(axis=1)
bed = pd.concat([bed.iloc[:, :3], scores], axis=1)
bed.to_csv(bed_output, sep="\t", header=False, index=False)
@staticmethod
def peaks_fit(bam_coverage, bed_output, dist_func="lognorm_dist", **kwargs):
"""
fit the peak scores to a distribution
"""
bed = pd.read_csv(bam_coverage, header=None, sep="\t")
region = (
bed[0].astype(str) + ":" + bed[1].astype(str) + "-" + bed[2].astype(str)
)
score = bed[3]
# obtain a distribution
dist_func = Distributions().set(dist_func)
# with np.errstate(divide="ignore", invalid="ignore"):
# dist = dist_func(score, **kwargs)
dist = dist_func(score, **kwargs)
# replace scores with distribution values
ascending_dist = np.sort(dist)
ascending_scores_index = np.searchsorted(np.sort(score), score)
norm_score = np.array([ascending_dist[i] for i in ascending_scores_index])
logn_score = np.log(norm_score + 1)
scaled_score = minmax_scale(logn_score)
log10_score = np.log10(norm_score + 1)
data = {
"region": region, # ex: "chr1:0-200"
"score": score,
"norm_score": norm_score,
"logn_score": logn_score,
"scaled_score": scaled_score,
"log10_score": log10_score, # used by the original function
}
bed = | pd.DataFrame(data=data) | pandas.DataFrame |
# etl.py - module to clean up incoming covid 19 datasets for ingenstion
__version__ = '0.1'
__all__ = ['FetchData', 'GetCTPData', 'ProcessCTPData', 'FormatDates']
""" @TODO - should have own getargs.py """
import csv
import os
import datetime
import pandas as pd
from common import utils
from Datasets.__meta__.state_abbrs import state_abbrs
US_DAILY = 'https://covidtracking.com/api/v1/us/daily'
US_STATES_DAILY = 'https://covidtracking.com/api/v1/states/daily'
home = os.environ['EST_HOME']
cols = ['date', 'state', 'positive', 'hospitalized', 'death', 'positiveIncrease']
col_name_map = {
'positive': 'confirmed',
'death': 'death',
'state': 'state'
}
class FetchData():
""" Retrieve (csv) Dataset from a known source """
def __init__(self):
pass
def get_csv_data(self, csv_url: str):
if csv_url[-4:] == '.csv':
return pd.read_csv(csv_url)
else:
raise ValueError('Target URL does not appear to be a csv.')
class ProcessData():
""" Retrieve (csv) Dataset from a known source """
def __init__(self, dir):
self.data_dir = os.environ['EST_HOME'] + '/' + dir
def parse_data_by_region(self, df):
""" Split CTP by state and cast to int. No idea why they have it as floats. """
df = df.fillna(0) # (Not needed with na=False)
# We should always check that dir has a trailing /
countries = list(df.country_region_code.unique())
for country in countries:
if type(country) == str:
df_ = df[df['country_region_code'].str.contains(country, na=False)]
os.chdir(data_dir)
if os.path.isdir(country):
write_f(country, country + '.csv', df_.to_csv(index=False))
else:
os.mkdir(country)
write_f(country + '/', country, df_.to_csv(index=False))
def parse_us_data_by_state(self, df):
df_us = df[df['country_region_code'].str.contains('US', na=False)]
state_names = list(df_us.sub_region_1.unique())
for state in state_names:
#if type(state) is str and state != '' and state is not None:
if type(state) is str:
state_abbr = state_abbrs.get(state)
df_state = df_us[df_us['sub_region_1'].str.contains(state, na=False)]
df_state = self.avg_social_distance_scores(df_state)
if os.path.exists(state_abbr + '.csv'):
os.system('cp ' + state_abbr + '.' + state_abbr)
write_f(self.data_dir + 'US-mobile/', state_abbr, df_state.to_csv(index=False))
# pass
#os.system('cp -r country '.'+country)
#if os.path.isdir(country):
# write_f(dir, state, df_.to_csv(index=False))
# if country_region_code != '' and os.path.isdir(country_region_code):
#write_f(dir, state, df_.to_csv(index=False))
def avg_social_distance_scores(self, df):
df['social_distance_avg'] = df[[
'retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
]].mean(axis=1)
return df
class GetCTPData():
# """ Retrieve Covid Tracking Poject Data """
def __init__(self):
self.us_daily = US_DAILY
self.us_states_daily = US_STATES_DAILY
self.us_daily = 'https://covidtracking.com/api/v1/us/daily'
self.us_states_daily = 'https://covidtracking.com/api/v1/states/daily'
def _get_ctp_data(self, ctp_url: str) -> pd.DataFrame:
return pd.read_csv(ctp_url + '.csv')
def get_state_historic(self) -> pd.DataFrame:
return self._get_ctp_data(self.us_states_daily)
def get_us_historic(self) ->pd.DataFrame:
return self._get_ctp_data(self.us_daily)
class ProcessCTPData():
# """ Process Covid Tracking Poject Data """
def __init__(self):
pass # No constructor
def keep_cols(self, df):
cols_drop = [col for col in df.columns if col not in cols]
for col in cols_drop:
df.drop(col, axis=1, inplace=True)
return df
def parse_state_daily_data(self, df):
""" Split CTP by state and cast to int. No idea why they have it as floats. """
dir = os.environ['EST_HOME'] + '/Datasets/USA/'
states = list(df.state.unique())
df = df.fillna(0)
for state in states:
df_ = df[df['state'].str.contains(state)]
cols = list(df.columns)
#cols.remove('date')
cols.remove('state')
df[cols] = df[cols].astype(int)
write_f(dir, state, df_.to_csv(index=False))
class FormatDates():
""" Useful date formatting functions """
def __init(self):
""" Constructor takes path to datafile as arg """
#df = get_df(data)
def get_df(dataset):
""" Takes a *formatted* dataframe with dates in column 0. """
df = csv_to_df(dataset) # Loads dataframe for a given location.
df.rename(columns = {df.columns[0]:'date'}, inplace = True)
df.columns = map(str.lower, df.columns) # This is gneral formatting, not date formatting.
return df
def cast_to_file(datafile, cast_from, cast_to):
""" Recasts a csv with a date column. """
df = df_cast(df)
write_f(df.to_csv(datafile, index=false))
def df_cast(self, cast_from, cast_to):
""" date column format dispatch """
#cast = 'cast_from' + cast_from + 'to' + cast_to
cast = 'df' + cast_from + 'to' + cast_to
df_cast = getattr(self, cast, lambda: "Not Found")
return df_cast
""" Given a dataframe with a date column of type int, casts column to date object type """
def df_int_to_date(self, df):
for column in df.columns:
if column == 'date':
pass
df['date'] = df['date'].apply(lambda x: pd.to_datetime(str(x), format='%d/%m/%Y'))
return df
def df_date_to_int(df):
for col in df.columns:
if col.lower() == 'date':
#df.date.str.strip('-') # Doesn't work.
df.date = df.date.str.replace('-', '', regex=True)
df[col] = df[col].astype(int)
return df
def df_str_to_str(self, df) -> None:
""" Fix date formatting in a csv """
df['date'] = pd.to_datetime(df['date'])
df.date.apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)
return df.to_csv(index=False)
def df_format_dates(df, col='Date'):
""" Fix date formatting in a dataframe """
df[col] = pd.to_datetime(df[col])
df.dateRep.apply(lambda x: x.strftime('%Y-%m-%d')).astype(str)
return df
""" additional cast functions in utils """
def df_floats_to_int(df):
for col in df.columns:
if col != 'date':
df.col = df.col.astype(int)
def df_cast_col(df, col, cast):
""" This *should* check the from type before casting. """
for column in df.columns:
if column == col: # This is why I <3 Python.
df.column = df.col.astype(cast)
def df_dt_to_ord(df):
""" Given a dataframe with a Date column as the first column, converts to ordinal """
import datetime as dt
#df['Date_ord'] = pd.to_datetime(df['Date']) # fix this to catch 'date' and 'Dates'
df['Date_ord'] = | pd.to_datetime(df.iloc[:,0]) | pandas.to_datetime |
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas_datareader as web
import datetime as dt
import xgboost as xgb
import pickle
app = dash.Dash()
server = app.server
# scaler = MinMaxScaler(feature_range=(0, 1))
#
# df_nse = pd.read_csv("./NSE-Tata-Global-Beverages-Limited.csv")
#
# df_nse["Date"]=pd.to_datetime(df_nse.Date,format="%Y-%m-%d")
# df_nse.index = df_nse['Date']
#
# data = df_nse.sort_index(ascending=True, axis=0)
#
# close_data = pd.DataFrame(index=range(0, len(df_nse)), columns=['Date', 'Close'])
# for i in range(0, len(data)):
# close_data["Date"][i] = data['Date'][i]
# close_data["Close"][i] = data["Close"][i]
#
# roc_data = pd.DataFrame(index=range(0, len(df_nse)), columns=['Date', 'Rate_Of_Change'])
# for i in range(0, len(data)):
# roc_data["Date"][i] = data['Date'][i]
# roc_data["Rate_Of_Change"][i] = (data["Close"][len(data) - 1] - data["Close"][i]) / data["Close"][i]
#
#
# closed_grap_data = close_data[987:]
#train data
start = dt.datetime(2012,1,1)
end = dt.datetime(2020,1,1)
#Load Test Data
test_start = dt.datetime(2020,1,1)
test_end = dt.datetime.now()
stocks = ['NOK', 'AAPL','FB','TSLA','NFLX']
# dataE = web.DataReader(stocks, 'yahoo', test_start, test_end)
sample = web.DataReader("NOK", 'yahoo', test_start, test_end)
dataXGBoost=web.DataReader("NFLX", 'yahoo', test_start, test_end)
drop_cols = [ 'Volume', 'Open', 'Low', 'High','Close']
sample = sample.drop(drop_cols, 1)
dataXGBoost = dataXGBoost.drop(drop_cols, 1)
#Relative Strength Index
def relative_strength_idx(df, n=14):
close = df['Adj Close']
delta = close.diff()
delta = delta[1:]
pricesUp = delta.copy()
pricesDown = delta.copy()
pricesUp[pricesUp < 0] = 0
pricesDown[pricesDown > 0] = 0
rollUp = pricesUp.rolling(n).mean()
rollDown = pricesDown.abs().rolling(n).mean()
rs = rollUp / rollDown
rsi = 100.0 - (100.0 / (1.0 + rs))
return rsi
# MA
def MA(df):
df['EMA_9'] = df['Adj Close'].ewm(9).mean().shift()
df['SMA_5'] = df['Adj Close'].rolling(5).mean().shift()
df['SMA_10'] = df['Adj Close'].rolling(10).mean().shift()
df['SMA_15'] = df['Adj Close'].rolling(15).mean().shift()
df['SMA_30'] = df['Adj Close'].rolling(30).mean().shift()
def MACD(df):
EMA_12 = pd.Series(df['Adj Close'].ewm(span=12, min_periods=12).mean())
EMA_26 = pd.Series(df['Adj Close'].ewm(span=26, min_periods=26).mean())
df['MACD'] = pd.Series(EMA_12 - EMA_26)
df['MACD_signal'] = pd.Series(df.MACD.ewm(span=9, min_periods=9).mean())
def XGBOOST_RSI_MA_predict_next_price(sticker):
test_data = web.DataReader(sticker, 'yahoo', test_start, test_end)
test_data['RSI'] = relative_strength_idx(test_data).fillna(0)
MA(test_data)
MACD(test_data)
# test_data['Adj Close'] = test_data['Adj Close'].shift(-1)
print("data adj: ", test_data)
test_data = test_data.iloc[33:]
# test_data = test_data[:-1]
drop_cols = ['Volume', 'Open', 'Low', 'High', 'Close']
test_data = test_data.drop(drop_cols, 1)
print("DF: ", test_data)
datasetX = test_data.drop(['Adj Close'], 1)
X = datasetX.values
model = pickle.load(open(f'XGB_RSI_MA_{sticker}_Model.pkl', "rb"))
y_pred = model.predict(X)
predicted_prices = test_data.copy()
predicted_prices[f'XGBOOST_RSI_MA_predict_next_price_{sticker}'] = y_pred
# return y_pred[-1]
return predicted_prices
def XGBOOST_predict_next_price(sticker):
test_data = web.DataReader(sticker, 'yahoo', test_start, test_end)
datasetX = test_data['Adj Close'].copy()
X = datasetX.values
model = pickle.load(open(f'XGB_{sticker}_Model.pkl', "rb"))
y_pred = model.predict(X)
print("Xgboost", y_pred)
# return y_pred[-1]
return y_pred
def XGBOOST_predict_n_day(sticker,n):
test_data = web.DataReader(sticker, 'yahoo', test_start, test_end)
datasetX = test_data['Adj Close'].copy()
X = datasetX.values
model = pickle.load(open(f'XGB_{sticker}_Model.pkl', "rb"))
for i in range(0, n):
y_pred = model.predict(X)
X = y_pred
print("len", len(y_pred))
return y_pred[-1]
# return y_pred
#Du doan su dung LSTM
def LSTM_predict_next_price(data):
clean_data = pd.DataFrame(index=range(0, len(data)), columns=['Date', 'Close'])
for i in range(0, len(data)):
clean_data["Date"][i] = data['Date'][i]
clean_data["Close"][i] = data["Close"][i]
# for i in range(0,days):
# clean_data["Date"][len(data)+i]=clean_data["Date"][len(data) - 1] + pd.DateOffset(days=i+1)
print("clean_data",clean_data)
clean_data.index = clean_data.Date
clean_data.drop("Date", axis=1, inplace=True)
dataset = clean_data.values
train = dataset[0:987, :]
valid = dataset[987:, :]
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
x_train, y_train = [], []
for i in range(60, len(train)):
x_train.append(scaled_data[i - 60:i, 0])
y_train.append(scaled_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
model = load_model("saved_lstm_closed_model.h5")
inputs = clean_data[len(clean_data) - len(valid) - 60:].values
inputs = inputs.reshape(-1, 1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(60, inputs.shape[0]):
X_test.append(inputs[i - 60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
closing_price = model.predict(X_test)
closing_price = scaler.inverse_transform(closing_price)
print("closing_price",closing_price,len(closing_price))
return closing_price[len(closing_price)-1][0]
#update
def get_predict_by_sticker(modelName,sticker):
data = web.DataReader(sticker, 'yahoo', start, end)
#Prepare Data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data['Adj Close'].values.reshape(-1,1))
prediction_days=60
test_data = web.DataReader(sticker, 'yahoo', test_start, test_end)
print("test data Adj Close: ",test_data['Adj Close'])
total_dataset = pd.concat((data['Adj Close'],test_data['Adj Close']),axis=0)
model_inputs = total_dataset[len(total_dataset)-len(test_data)-prediction_days:].values
model_inputs = model_inputs.reshape(-1,1)
model_inputs = scaler.transform(model_inputs)
#Make predictions on Test Data
x_test = []
for x in range(prediction_days,len(model_inputs)):
x_test.append(model_inputs[x-prediction_days:x,0])
x_test = np.array(x_test)
x_test = np.reshape(x_test,(x_test.shape[0],x_test.shape[1],1))
#predict LSTM close price
model = load_model(f'saved_{modelName}_closed_model_{sticker}.h5')
predicted_prices = model.predict(x_test)
predicted_prices = scaler.inverse_transform(predicted_prices)
print("Predict: ",predicted_prices)
test_data['PredictionLSTM'] = predicted_prices
return predicted_prices
def predict_next_n_day(modelName,sticker,n):
data = web.DataReader(sticker, 'yahoo', start, end)
# Prepare Data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data['Adj Close'].values.reshape(-1, 1))
prediction_days = 60
test_data = web.DataReader(sticker, 'yahoo', test_start, test_end)
total_dataset = | pd.concat((data['Adj Close'], test_data['Adj Close']), axis=0) | pandas.concat |
"""
.. module:: merge3
:synopsis: merge assemblies from different cell types
jGEM version 3 merger
.. moduleauthor:: <NAME> <<EMAIL>>
"""
# system imports
import subprocess
import multiprocessing
import gzip
import os
import time
import shutil
from functools import reduce
from operator import iadd, iand
from collections import Counter
from itertools import repeat
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
# 3rd party imports
import pandas as PD
import numpy as N
import matplotlib.pyplot as P
# LocalAssembler imports
from collections import Counter
from matplotlib.collections import BrokenBarHCollection
from functools import partial, reduce
from operator import iadd
import bisect
from scipy.optimize import nnls
# library imports
from jgem import utils as UT
from jgem import bigwig as BW
from jgem import bedtools as BT
from jgem import gtfgffbed as GGB
from jgem import taskqueue as TQ
from jgem import assembler3 as A3
import jgem.cy.bw as cybw
############# Merge Prep ######################################################
class PrepBWSJ(object):
def __init__(self, j2pres, genome, dstpre, libsizes=None, np=10):
self.j2pres = j2pres
self.libsizes = libsizes # scale = 1e6/libsize
self.genome = genome
self.dstpre = dstpre
self.np = np
def __call__(self):
# exdf => ex.p, ex.n, ex.u
# sjdf => sj.p, sj.n, sj.u
# paths => sjpath.bed
# divide into tasks (exdf,sjdf,paths) x chroms
self.server = server = TQ.Server(name='PrepBWSJ', np=self.np)
self.chroms = chroms = UT.chroms(self.genome)
csizes = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size')
self.exstatus = exstatus = {}
self.sjstatus = sjstatus = {}
self.pastatus = pastatus = {}
self.sdstatus = sdstatus = {}
exdone=False
sjdone=False
padone=False
sddone=False
with server:
for chrom in chroms:
# exdf tasks
tname = 'prep_exwig_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom, csizes[chrom])
task = TQ.Task(tname, prep_exwig_chr, args)
server.add_task(task)
# exdf tasks
tname = 'prep_sjwig_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom, csizes[chrom])
task = TQ.Task(tname, prep_sjwig_chr, args)
server.add_task(task)
# exdf tasks
tname = 'prep_sjpath_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom)
task = TQ.Task(tname, prep_sjpath_chr, args)
server.add_task(task)
tname = 'prep_sjdf_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom)
task = TQ.Task(tname, prep_sjdf_chr, args)
server.add_task(task)
while server.check_error():
try:
name, rslt = server.get_result(timeout=5) # block until result come in
except TQ.Empty:
name, rslt = None, None
if name is not None:
if name.startswith('prep_exwig_chr.'):
chrom = name.split('.')[1]
exstatus[chrom] = rslt
if len(exstatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_exbw $$$$$$$$$$$')
tname='prep_exbw'
args = (self.dstpre, chroms, self.genome)
task = TQ.Task(tname, prep_exbw, args)
server.add_task(task)
if name.startswith('prep_sjwig_chr.'):
chrom = name.split('.')[1]
sjstatus[chrom] = rslt
if len(sjstatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_sjbw $$$$$$$$$$$')
tname='prep_sjbw'
args = (self.dstpre, chroms, self.genome)
task = TQ.Task(tname, prep_sjbw, args)
server.add_task(task)
if name.startswith('prep_sjpath_chr.'):
chrom = name.split('.')[1]
pastatus[chrom] = rslt
if len(pastatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_sjpath $$$$$$$$$$$')
tname='prep_sjpath'
args = (self.dstpre, chroms)
task = TQ.Task(tname, prep_sjpath, args)
server.add_task(task)
if name.startswith('prep_sjdf_chr.'):
chrom = name.split('.')[1]
sdstatus[chrom] = rslt
if len(sdstatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_sjdf $$$$$$$$$$$')
tname='prep_sjdf'
args = (self.dstpre, chroms)
task = TQ.Task(tname, prep_sjdf, args)
server.add_task(task)
if name=='prep_exbw':
print('$$$$$$$$ prep_exbw done $$$$$$$$$$$')
exdone=True
if name=='prep_sjbw':
print('$$$$$$$$ prep_sjbw done $$$$$$$$$$$')
sjdone=True
if name=='prep_sjpath':
print('$$$$$$$$ prep_sjpath done $$$$$$$$$$$')
padone=True
if name=='prep_sjdf':
print('$$$$$$$$ prep_sjdf done $$$$$$$$$$$')
sddone=True
if exdone&sjdone&padone&sddone:
break
print('Exit Loop')
print('Done')
def prep_exwig_chr(j2pres, libsizes, dstpre, chrom, csize):
ss = ['p','n','u']
s2s = {'p':['+'],'n':['-'],'u':['.+','.-','.']}
a = {s:N.zeros(csize) for s in ss}
wigpaths = {s:dstpre+'.ex.{0}.{1}.wig'.format(s,chrom) for s in ss}
if all([os.path.exists(dstpre+'.ex.{0}.bw'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(dstpre+'.ex.{0}.wig'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(wigpaths[s]) for s in ss]):
return wigpaths
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
exdf = UT.read_pandas(pre+'.exdf.txt.gz',names=A3.EXDFCOLS)
exdf = exdf[exdf['chr']==chrom]
for s in ss:
exsub = exdf[exdf['strand'].isin(s2s[s])]
for st,ed,ecov in exsub[['st','ed','ecov']].values:
a[s][st:ed] += ecov*scale
sedf = UT.read_pandas(pre+'.sedf.txt.gz',names=A3.EXDFCOLS)
sedf = sedf[sedf['chr']==chrom]
for s in ss:
sesub = sedf[sedf['strand'].isin(s2s[s])]
for st,ed,ecov in sesub[['st','ed','ecov']].values:
a[s][st:ed] += ecov*scale
for s in ['p','n','u']:
if libsizes is not None:
a[s] /= float(n) # average
cybw.array2wiggle_chr64(a[s], chrom, wigpaths[s], 'w')
return wigpaths
def prep_sjwig_chr(j2pres, libsizes, dstpre, chrom, csize):
ss = ['p','n','u']
s2s = {'p':['+'],'n':['-'],'u':['.+','.-']}
a = {s:N.zeros(csize) for s in ss}
wigpaths = {s:dstpre+'.sj.{0}.{1}.wig'.format(s,chrom) for s in ss}
if all([os.path.exists(dstpre+'.sj.{0}.bw'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(dstpre+'.sj.{0}.wig'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(wigpaths[s]) for s in ss]):
return wigpaths
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
sjdf = UT.read_pandas(pre+'.sjdf.txt.gz',names=A3.SJDFCOLS)
sjdf = sjdf[sjdf['chr']==chrom]
for s in ss:
sjsub = sjdf[sjdf['strand'].isin(s2s[s])]
for st,ed,tcnt in sjsub[['st','ed','tcnt']].values:
a[s][st:ed] += tcnt*scale
for s in ['p','n','u']:
if libsizes is not None:
a[s] /= float(n) # average
cybw.array2wiggle_chr64(a[s], chrom, wigpaths[s], 'w')
return wigpaths
def prep_sjpath_chr(j2pres, libsizes, dstpre, chrom):
pc2st = {}
pc2ed = {}
pc2tst = {}
pc2ted = {}
pc2strand = {}
pc2tcov = {}
# pc2tcov0 = {}
# chr,st,ed,name,sc1(tcov),strand,tst,ted,sc2(),#exons,estarts,esizes
# cols = ['st','ed','name','strand','tst','ted','tcov0','tcov']
path = dstpre+'.sjpath.{0}.bed.gz'.format(chrom)
path0 = dstpre+'.sjpath.bed.gz'
if os.path.exists(path0):
return path
if os.path.exists(path):
return path
cols = ['st','ed','name','strand','tst','ted','tcov']
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
paths = UT.read_pandas(pre+'.paths.txt.gz', names=A3.PATHCOLS)
paths = paths[paths['chr']==chrom]
for st,ed,name,s,tst,ted,tcov in paths[cols].values:
pc = ','.join(name.split(',')[1:-1]) # trim 53exons => intron chain
if pc=='':
continue # ignore no junction path
pc2st[pc] = min(st, pc2st.get(pc,st))
pc2ed[pc] = max(ed, pc2ed.get(pc,ed))
pc2tst[pc] = tst
pc2ted[pc] = ted
pc2strand[pc] = s
pc2tcov[pc] = pc2tcov.get(pc,0)+scale*tcov
#pc2tcov0[pc] = pc2tcov0.get(pc,0)+scale*tcov0
df = PD.DataFrame({'st':pc2st,'ed':pc2ed,'tst':pc2tst,'ted':pc2ted,
'strand':pc2strand,'tcov':pc2tcov})
df['chr'] = chrom
df.index.name = 'name'
df.reset_index(inplace=True)
# create bed12: parse name => #exons, esizes, estarts
df['pc'] = df['name'].copy()
idxp = df['strand'].isin(['+','.+'])
if libsizes is not None:
df['tcov'] = df['tcov']/float(n)
df.loc[idxp,'name'] = ['{0},{1},{2}'.format(s,p,e) for s,p,e in df[idxp][['st','pc','ed']].values]
df.loc[~idxp,'name'] = ['{2},{1},{0}'.format(s,p,e) for s,p,e in df[~idxp][['st','pc','ed']].values]
df = df.groupby('pc').first() # get rid of unstranded duplicates
cmax = 9+N.log2(N.mean(scales))
bed = A3.path2bed12(df, cmax)
# reset sc1 to tcov (from log2(tcov+2)*100)
bed['sc1'] = bed['tcov']
GGB.write_bed(bed, path, ncols=12)
return path
def prep_sjdf_chr(j2pres, libsizes, dstpre, chrom):
pc2st = {}
pc2ed = {}
pc2strand = {}
pc2tcnt = {}
pc2ucnt = {}
# chr,st,ed,name,sc1(tcov),strand,tst,ted,sc2(),#exons,estarts,esizes
# cols = ['st','ed','name','strand','tst','ted','tcov0','tcov']
path = dstpre+'.sjdf.{0}.txt.gz'.format(chrom)
path0 = dstpre+'.sjdf.txt.gz'
if os.path.exists(path0):
return path
if os.path.exists(path):
return path
cols = ['st','ed','name','strand','st','ed','tcnt','ucnt']
# cols = A3.SJDFCOLS
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
paths = UT.read_pandas(pre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
paths = paths[paths['chr']==chrom]
for st,ed,pc,s,st,ed,tcnt,ucnt in paths[cols].values:
pc2st[pc] = st
pc2ed[pc] = ed
pc2strand[pc] = s
pc2tcnt[pc] = pc2tcnt.get(pc,0)+scale*tcnt
pc2ucnt[pc] = pc2ucnt.get(pc,0)+scale*ucnt
df = PD.DataFrame({'st':pc2st,'ed':pc2ed,'st':pc2st,'ed':pc2ed,
'strand':pc2strand,'tcnt':pc2tcnt,'ucnt':pc2ucnt})
df['chr'] = chrom
df['kind'] = 'j'
if libsizes is not None:
df['tcnt'] = df['tcnt']/float(n)
df['ucnt'] = df['ucnt']/float(n)
df.index.name = 'name'
df.reset_index(inplace=True)
UT.write_pandas(df[A3.SJDFCOLS], path, '')
return path
def prep_exbw(dstpre, chroms, genome):
return _prep_bw(dstpre, chroms, genome, 'ex')
def prep_sjbw(dstpre, chroms, genome):
return _prep_bw(dstpre, chroms, genome, 'sj')
def _prep_bw(dstpre, chroms, genome, w):
# concatenate
ss = ['p','n','u']
files = []
bwpaths = {s: dstpre+'.{1}.{0}.bw'.format(s,w) for s in ss}
if all([os.path.exists(bwpaths[s]) for s in ss]):
return bwpaths
for s in ss:
dstwig = dstpre+'.{1}.{0}.wig'.format(s,w)
with open(dstwig, 'wb') as dst:
for c in chroms:
srcpath = dstpre+'.{2}.{0}.{1}.wig'.format(s,c,w)
with open(srcpath,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
files.append(dstwig)
print('converting wig to bigwig {0}'.format(dstwig))
BT.wig2bw(dstwig, UT.chromsizes(genome), bwpaths[s])
# clean up
for f in files:
os.unlink(f)
return bwpaths
def prep_sjpath(dstpre, chroms):
dstpath = dstpre+'.sjpath.bed.gz'
if os.path.exists(dstpath):
return dstpath
files = []
with open(dstpath, 'wb') as dst:
for c in chroms:
srcpath = dstpre+'.sjpath.{0}.bed.gz'.format(c)
with open(srcpath,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
# for f in files: # keep separate chr files
# os.unlink(f)
return dstpath
def prep_sjdf(dstpre, chroms):
dstpath = dstpre+'.sjdf.txt.gz'
if os.path.exists(dstpath):
return dstpath
files = []
with open(dstpath, 'wb') as dst:
for c in chroms:
srcpath = dstpre+'.sjdf.{0}.txt.gz'.format(c)
with open(srcpath,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
# for f in files: # keep separate chr files
# os.unlink(f)
return dstpath
############# SJ Filter #######################################################
SJFILTERPARAMS = dict(
th_detected=1,
th_maxcnt=1,
th_maxoverhang=15,
th_minedgeexon=15,
th_sjratio=1e-3,
filter_unstranded=False,# there are substantial number of high cov unstranded
)
class SJFilter(object):
def __init__(self, bwsjpre, statspath, genome, np=10, **kw):
self.bwsjpre = bwsjpre
self.statspath = statspath
self.genome = genome
self.np = np
self.params = SJFILTERPARAMS.copy()
self.params.update(kw)
def __call__(self):
chroms = UT.chroms(self.genome)
csizedic = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size')
args = []
for c in chroms:
csize = csizedic[c]
args.append((self.bwsjpre, self.statspath, c, csize, self.params))
rslts = UT.process_mp(filter_sjpath, args, np=self.np, doreduce=False)
dstpath = self.bwsjpre+'.filtered.sjpath.bed.gz'
with open(dstpath,'wb') as dst:
for c in chroms:
srcpath = self.bwsjpre+'.filtered.sjpath.{0}.bed.gz'.format(c)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src, dst)
rslts = UT.process_mp(filter_sjdf, args, np=self.np, doreduce=False)
dstpath = self.bwsjpre+'.filtered.sjdf.txt.gz'
with open(dstpath,'wb') as dst:
for c in chroms:
srcpath = self.bwsjpre+'.filtered.sjdf.{0}.txt.gz'.format(c)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src, dst)
# make sj.bw
sjfiltered2bw(self.bwsjpre, self.genome, self.np)
for s in ['p','n','u']:
src = self.bwsjpre + '.ex.{0}.bw'.format(s)
dst = self.bwsjpre + '.filtered.ex.{0}.bw'.format(s)
cmd = ['ln','-s', src, dst]
subprocess.call(cmd)
def locus2pc(l):
chrom,sted,strand = l.split(':')
st,ed = sted.split('-')
st = str(int(st)-1)
if strand in ['+','.']:
return '|'.join([st,ed])
return '|'.join([ed,st])
def filter_sjpath(bwsjpre, statspath, chrom, csize, params):
# read in junction stats
stats = UT.read_pandas(statspath)
if 'chr' not in stats:
stats['chr'] = [x.split(':')[0] for x in stats['locus']]
if '#detected' in stats:
stats.rename(columns={'#detected':'detected'}, inplace=True)
stats = stats[stats['chr']==chrom].copy()
if 'pc' not in stats:
stats['pc'] = [locus2pc(x) for x in stats['locus']]
flds = ['detected','maxcnt','maxoverhang']
dics = {f: UT.df2dict(stats, 'pc', f) for f in flds}
# read sjpath
fpath_chr = bwsjpre+'.sjpath.{0}.bed.gz'.format(chrom)
dstpath = bwsjpre+'.filtered.sjpath.{0}.bed.gz'.format(chrom)
if os.path.exists(fpath_chr):
sj = GGB.read_bed(fpath_chr)
else:
fpath = bwsjpre+'.sjpath.bed.gz'
sj = GGB.read_bed(fpath)
sj = sj[sj['chr']==chrom].copy()
name0 = sj.iloc[0]['name']
if len(name0.split('|'))<len(name0.split(',')): # exons attached?
sj['name'] = [','.join(x.split(',')[1:-1]) for x in sj['name']]
# filter unstranded
if params['filter_unstranded']:
sj = sj[sj['strand'].isin(['+','-'])].copy()
# filter with stats
for f in flds:
sj[f] = [N.min([dics[f].get(x,0) for x in y.split(',')]) for y in sj['name']]
sj = sj[sj[f]>params['th_'+f]].copy() # filter
# edge exon size
sj['eflen'] = [int(x.split(',')[0]) for x in sj['esizes']]
sj['ellen'] = [int(x.split(',')[-2]) for x in sj['esizes']]
eth = params['th_minedgeexon']
sj = sj[(sj['eflen']>eth)&(sj['ellen']>eth)].copy()
# calculate sjratio, sjratio
if params['filter_unstranded']:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=False)
else:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=True)
with sjexbw:
sa = sjexbw.bws['sj']['a'].get(chrom,0,csize)
ea = sjexbw.bws['ex']['a'].get(chrom,0,csize)
a = sa+ea
# sj['sjratio'] = [x/N.mean(a[int(s):int(e)]) for x,s,e in sj[['sc1','tst','ted']].values]
sj['sjratio'] = [x/N.max(a[int(s):int(e)]) for x,s,e in sj[['sc1','tst','ted']].values]
sj = sj[sj['sjratio']>params['th_sjratio']]
GGB.write_bed(sj, dstpath, ncols=12)
def filter_sjdf(bwsjpre, statspath, chrom, csize, params):
# read in junction stats
stats = UT.read_pandas(statspath)
if 'chr' not in stats:
stats['chr'] = [x.split(':')[0] for x in stats['locus']]
if '#detected' in stats:
stats.rename(columns={'#detected':'detected'}, inplace=True)
stats = stats[stats['chr']==chrom].copy()
if 'pc' not in stats:
stats['pc'] = [locus2pc(x) for x in stats['locus']]
flds = ['detected','maxcnt','maxoverhang']
dics = {f: UT.df2dict(stats, 'pc', f) for f in flds}
# read sjdf
fpath_chr = bwsjpre+'.sjdf.{0}.txt.gz'.format(chrom)
dstpath = bwsjpre+'.filtered.sjdf.{0}.txt.gz'.format(chrom)
if os.path.exists(fpath_chr):
sj = UT.read_pandas(fpath_chr, names=A3.SJDFCOLS)
else:
fpath = bwsjpre+'.sjdf.txt.gz'
sj = UT.read_pandas(fpath, names=A3.SJDFCOLS)
sj = sj[sj['chr']==chrom].copy()
# filter unstranded
if params['filter_unstranded']:
sj = sj[sj['strand'].isin(['+','-'])].copy()
# filter with stats
for f in flds:
# sj[f] = [N.min([dics[f].get(x,0) for x in y.split(',')]) for y in sj['name']]
sj[f] = [dics[f].get(y,0) for y in sj['name']]
sj = sj[sj[f]>params['th_'+f]].copy() # filter
# edge exon size
# sj['eflen'] = [int(x.split(',')[0]) for x in sj['esizes']]
# sj['ellen'] = [int(x.split(',')[-2]) for x in sj['esizes']]
# eth = params['th_minedgeexon']
# sj = sj[(sj['eflen']>eth)&(sj['ellen']>eth)].copy()
# calculate sjratio, sjratio
if params['filter_unstranded']:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=False)
else:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=True)
with sjexbw:
sa = sjexbw.bws['sj']['a'].get(chrom,0,csize)
ea = sjexbw.bws['ex']['a'].get(chrom,0,csize)
a = sa+ea
# sj['sjratio'] = [x/N.mean(a[int(s):int(e)]) for x,s,e in sj[['tcnt','st','ed']].values]
sj['sjratio'] = [x/N.max(a[int(s):int(e)]) for x,s,e in sj[['tcnt','st','ed']].values]
sj = sj[sj['sjratio']>params['th_sjratio']]
UT.write_pandas(sj[A3.SJDFCOLS], dstpath, '')
def sjfiltered2wig(bwpre, chrom, chromsize):
a = {'+':N.zeros(chromsize, dtype=N.float64),
'-':N.zeros(chromsize, dtype=N.float64),
'.':N.zeros(chromsize, dtype=N.float64)}
path = bwpre+'.filtered.sjdf.{0}.txt.gz'.format(chrom)
sjchr = UT.read_pandas(path, names=A3.SJDFCOLS)
for st,ed,v,strand in sjchr[['st','ed','tcnt','strand']].values:
a[strand[0]][st:ed] += v
for strand in a:
wig = bwpre+'.filtered.sjdf.{0}.{1}.wig'.format(chrom, strand)
cybw.array2wiggle_chr64(a[strand], chrom, wig)
return path
def sjfiltered2bw(bwpre, genome, np=12):
chroms = UT.chroms(genome)
chromdf = UT.chromdf(genome).sort_values('size',ascending=False)
chroms = [x for x in chromdf['chr'] if x in chroms]
chromdic = UT.df2dict(chromdf, 'chr', 'size')
args = [(bwpre, c, chromdic[c]) for c in chroms]
rslts = UT.process_mp(sjfiltered2wig, args, np=np, doreduce=False)
S2N = {'+':'p','-':'n','.':'u'}
rmfiles = []
for strand in ['+','-','.']:
s = S2N[strand]
wigpath = bwpre+'.filtered.sj.{0}.wig'.format(s)
with open(wigpath, 'w') as dst:
for chrom in chroms:
f = bwpre+'.filtered.sjdf.{0}.{1}.wig'.format(chrom, strand)
with open(f,'r') as src:
shutil.copyfileobj(src, dst)
rmfiles.append(f)
bwpath = bwpre+'.filtered.sj.{0}.bw'.format(s)
BT.wig2bw(wigpath, UT.chromsizes(genome), bwpath)
rmfiles.append(wigpath)
for f in rmfiles:
os.unlink(f)
############# Cov Estimator ######################################################
class LocalEstimator(A3.LocalAssembler):
def __init__(self, modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom=False):
self.modelpre = modelpre
self.tcovth = tcovth
self.usegeom = usegeom
A3.LocalAssembler.__init__(self, bwpre, chrom, st, ed, dstpre)
bed12 = GGB.read_bed(modelpre+'.paths.withse.bed.gz')
assert(all(bed12['tst']<bed12['ted']))
idx = (bed12['chr']==chrom)&(bed12['tst']>=st)&(bed12['ted']<=ed)
self.paths = bed12[idx].copy()
eids = set()
sids = set()
for n in self.paths['name']:
eids.update(n.split('|'))
sids.update(n.split(',')[1:-1])
tgt1 = bwpre+'.filtered.{0}.bed.gz'.format(chrom)
tgt2 = bwpre+'.{0}.bed.gz'.format(chrom)
tgt3 = bwpre+'.sjpath.bed.gz'
if os.path.exists(tgt1):
sj = GGB.read_bed(tgt1)
elif os.path.exists(tgt2):
sj = GGB.read_bed(tgt2)
else:
sj = GGB.read_bed(tgt3)
idx0 = (sj['chr']==chrom)&(sj['tst']>=st)&(sj['ted']<=ed)
self.sjpaths0 = sj[idx0].copy()
# load exdf, sjdf
sjdf = UT.read_pandas(modelpre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
sjdf['tst'] = sjdf['st'] # for sjpath compatibility
sjdf['ted'] = sjdf['ed']
sjdf['sc1'] = sjdf['ucnt']
sjdf['sc2'] = sjdf['tcnt']
sjdf = sjdf[(sjdf['chr']==chrom)&(sjdf['st']>=st)&(sjdf['ed']<=ed)]
sjdf = sjdf[sjdf['name'].isin(sids)]
self.sjdf = sjdf.groupby(['chr','st','ed','strand']).first().reset_index()
exdf = UT.read_pandas(modelpre+'.exdf.txt.gz', names=A3.EXDFCOLS)
exdf = exdf[(exdf['chr']==chrom)&(exdf['st']>=st)&(exdf['ed']<=ed)]
exdf = exdf[exdf['name'].isin(eids)]
if os.path.exists(modelpre+'.sedf.txt.gz'):
sedf = UT.read_pandas(modelpre+'.sedf.txt.gz', names=A3.EXDFCOLS)
sedf = sedf[(sedf['chr']==chrom)&(sedf['st']>=st)&(sedf['ed']<=ed)]
sedf = sedf[sedf['name'].isin(eids)]
exdf = PD.concat([exdf,sedf],ignore_index=True)
self.exdf = exdf.groupby(['chr','st','ed','strand','kind']).first().reset_index()
A3.set_ad_pos(self.sjdf, 'sj')
A3.set_ad_pos(self.exdf, 'ex')
# filled
self.filled = {}
sjs = self.sjdf
exs = self.exdf[self.exdf['kind']=='i'].copy()
exs['ost'] = exs['st']-self.st
exs['oed'] = exs['ed']-self.st
for s in ['+','-']:
sja = self.arrs['sj'][s]
sj = sjs[sjs['strand'].isin(A3.STRS[s])]
ex = exs[exs['strand'].isin(A3.STRS[s])]
self.filled[s] = A3.fill_gap(sja, sj, ex, s, self.st)
# fix_i53completematch(self.exdf, self.paths) # extend 5'3' exons completely matched internal exons
def process(self):
self.calculate_ecovs()
self.calculate_scovs()
self.estimate_abundance()
self.write()
return
def calculate_scovs(self):
sj = self.sjdf
sj0 = self.sjpaths0
sj0mat = sj0[['sc1','sc2','name']].values
tmp = [[(sc1,sc2) for sc1,sc2,p in sj0mat if y in p] for y in sj['name']]
sj['ucnt'] = [N.sum([x[0] for x in y]) for y in tmp]
sj['tcnt'] = [N.sum([x[1] for x in y]) for y in tmp]
self.sjdfi = sj.set_index('name')
def calculate_branchp(self, jids, eids):
sj0 = self.sjdfi
sj = sj0.ix[jids].reset_index()
ex0 = self.exdfi
ex = ex0.ix[eids].reset_index()
dsump = sj.groupby('dpos')['tcnt'].sum().astype(float)
tmp = dsump.ix[sj['dpos'].values]
jdp = sj['tcnt'].values/tmp.values
idx = N.array(tmp==0, dtype=bool)
jdp[idx] = 0.
j2p = dict(zip(sj['name'].values, jdp))
# exon groupby acceptor
asump = ex.groupby('apos')['ecov'].sum().astype(float)
tmp = asump.ix[ex['apos'].values]
eap = ex['ecov'].values/(tmp.values)
idx = N.array(tmp==0, dtype=bool)
eap[idx] = 0.
e2ap = dict(zip(ex['name'].values, eap))
dsump = ex.groupby('dpos')['ecov'].sum().astype(float)
tmp = dsump.ix[ex['dpos'].values]
edp = ex['ecov'].values/(tmp.values)
idx = N.array(tmp==0, dtype=bool)
edp[idx] = 0.
e2dp = dict(zip(ex['name'].values, edp))
return j2p, e2ap, e2dp
def tcov_by_nnls(self, s, e, strand):
o = int(self.st)
p = self.paths
idx = (p['tst']>=s)&(p['ted']<=e)&(p['strand'].isin(A3.STRS[strand]))
ps = p[idx]
if len(ps)==0:
return None
pg = ps.groupby(['tst','ted']).first().reset_index()[['chr','tst','ted','strand','name']].sort_values(['tst','ted'])
pg['strand'] = strand
ne = len(pg)
exa = self.arrs['ex'][strand]
# sja = self.arrs['sj'][strand]
sja = self.filled[strand]
def cov0(s,e):
# return N.sum(sja[s-o:e-o]+exa[s-o:e-o])/(e-s)
return N.mean(sja[s-o:e-o])
# def cov1s(s):
# s0 = max(0, int(s)-o-10)
# s1 = max(s0+1,int(s)-o)
# return N.mean(exa[s0:s1])
# def cov1e(e):
# return N.mean(exa[int(e)-o:int(e)-o+10])
e_ed2cov = self.eed2cov[strand]
e_st2cov = self.est2cov[strand]
def cov1s(s):
return e_ed2cov.get(s,0)
def cov1e(e):
return e_st2cov.get(e,0)
def cov2s(s): # donor
# s0 = max(0, s-o-1)
return max(0, sja[int(s)-o]-sja[int(s)-o-1])
def cov2e(e): # acceptor
# e0 = max(0, e-o-1)
return max(0, sja[int(e)-o-1]-sja[int(e)-o])
# cov0
if ne>1:
pg.rename(columns={'tst':'st','ted':'ed'}, inplace=True)
pg['eid'] = N.arange(len(pg))
ci = UT.chopintervals(pg, idcol='eid')
ci['cov'] = [cov0(s,e) for s,e in ci[['st','ed']].values]
ci['name1'] = ci['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')])
nc = len(ci)
mat = N.zeros((nc,ne))
for i,n1 in enumerate(ci['name1'].values):# fill in rows
N.put(mat[i], N.array(n1), 1)
try:
ecov,err = nnls(mat, ci['cov'].values)
pg['tcov0a'] = ecov
except e:
# too much iteration?
LOG.warning('!!!!!! Exception in NNLS (tcov_by_nnls) @{0}:{1}-{2}, setting to zero !!!!!!!!!'.format(self.chrom, s, e))
pg['tcov0a'] = 0
# raise e
pg.rename(columns={'st':'tst','ed':'ted'}, inplace=True)
else: # this includes single exons
s,e = pg.iloc[0][['tst','ted']]
pg['tcov0a'] = cov0(s,e)
# cov1, cov2
if ne>1:
sts = sorted(set(pg['tst'].values))
eds = sorted(set(pg['ted'].values))
nst,ned = len(sts),len(eds)
mat = N.array([(pg['tst']==x).values for x in sts]+[(pg['ted']==x).values for x in eds], dtype=float)
c = N.array([cov1s(x) for x in sts]+[cov1e(x) for x in eds])
# enforce flux conservation: scale up 5'
stsum = N.sum(c[:nst])
edsum = N.sum(c[nst:])
if stsum<1e-9 or edsum<1e-9:
pg['tcov0b'] = 0
else:
c0 = c.copy()
if strand in ['+','.+']:
c[:nst] = (edsum/stsum)*c[:nst]
else:
c[nst:] = (stsum/edsum)*c[nst:]
try:
ecov,err = nnls(mat, c)
except e:
print('s:{0},e:{1},strand:{2}'.format(s,e,strand))
print('stsum:', stsum)
print('edsum:', edsum)
print('nnls error tcov0b', mat, c, c0)
print('sts:',sts)
print('eds:',eds)
print('pg:',pg)
pg['tcov0c'] = 0
raise e
pg['tcov0b'] = ecov
mat = N.array([(pg['tst']==x).values for x in sts]+[(pg['ted']==x).values for x in eds], dtype=float)
c = N.array([cov2s(x) for x in sts]+[cov2e(x) for x in eds])
# enforce flux conservation: scale up 5'
stsum = N.sum(c[:nst])
edsum = N.sum(c[nst:])
if stsum<1e-9 or edsum<1e-9:
pg['tcov0c'] = 0
else:
if strand in ['+','.+']:
c[:nst] = (edsum/stsum)*c[:nst]
else:
c[nst:] = (stsum/edsum)*c[nst:]
try:
ecov,err = nnls(mat, c)
except e:
print('s:{0},e:{1},strand:{2}'.format(s,e,strand))
print('nnls error tcov0c', mat, c)
pg['tcov0c'] = 0
raise e
pg['tcov0c'] = ecov
else:
s,e = pg.iloc[0][['tst','ted']]
pg['tcov0b'] = (cov1s(s)+cov1e(e))/2.
pg['tcov0c'] = (cov2s(s)+cov2e(e))/2.
if not self.usegeom:
# pg['tcov0'] = pg[['tcov0a','tcov0b','tcov0c']].mean(axis=1)
# pg['tcov0'] = (2*pg['tcov0a']+pg['tcov0b']+pg['tcov0c'])/4. # weighted
pg['tcov0'] = pg[['tcov0a','tcov0b','tcov0c']].median(axis=1)
else:
pg['tcov0'] = N.power(pg['tcov0a']*pg['tcov0b']*pg['tcov0c'], 1/3.) # geometric mean
pg.loc[pg['tcov0']<0,'tcov0'] = 0 # shouldn't really happen
keys = [tuple(x) for x in p[idx][['tst','ted']].values]
for f in ['tcov0','tcov0a','tcov0b','tcov0c']:
p.loc[idx, f] = pg.set_index(['tst','ted']).ix[keys][f].values
return pg[['chr','tst','ted','strand','tcov0']]
def tcov_by_branchp(self, tst, ted, strand, tcov0):
p = self.paths
idx = (p['strand'].isin(A3.STRS[strand]))&(p['tst']==tst)&(p['ted']==ted)
if N.sum(idx)==0:
return
# if N.sum(idx)>1:
# calculate branchp within this group
jids = set()
eids = set()
for n in p[idx]['name']:
jids.update(n.split(',')[1:-1])
eids.update(n.split('|'))
j2p, e2ap, e2dp = self.calculate_branchp(jids, eids)
def _prob(y):
epath0 = y.split('|')
e5 = epath0[0] # use donor p
epath = epath0[1:] # use acceptor p
jpath = y.split(',')[1:-1]
return e2dp[e5]*N.prod([e2ap[x] for x in epath])*N.prod([j2p[x] for x in jpath])
p.loc[idx,'tcov'] = [tcov0*_prob(y) for y in p[idx]['name']]
# else:
# p.loc[idx,'tcov'] = tcov0
def estimate_abundance(self):
# 1) 5-3 group by NNLS
# 2) within 5-3 group by tree branch prob
paths = self.paths
idxme = paths['name'].str.contains('\|')
mepaths = paths[idxme].copy()
sepaths = paths[~idxme].copy()
self.paths = mepaths
for s in ['+','-']:
ps = mepaths[mepaths['strand'].isin(A3.STRS[s])]
if len(ps)==0:
continue
# for chrom,st,ed in UT.union_contiguous(ps[['chr','st','ed']],returndf=False):
poscols = ['chr','tst','ted']
for chrom,st,ed in UT.union_contiguous(ps[poscols],pos_cols=poscols,returndf=False):
pg = self.tcov_by_nnls(st,ed,s)
if pg is not None:
for chrom,tst,ted,strand,tcov0 in pg.values:
self.tcov_by_branchp(tst,ted,strand,tcov0)
e2c = UT.df2dict(self.exdf, 'name', 'ecov')
sepaths['tcov'] = [e2c[x] for x in sepaths['name']]
for f in ['tcov0','tcov0b']:
sepaths[f] = sepaths['tcov']
sepaths['tcov0a'] = 0.
sepaths['tcov0c'] = 0.
paths = PD.concat([mepaths, sepaths], ignore_index=True)
paths.sort_values(['chr','st','ed'],inplace=True)
self.paths = paths
def write(self):
pre = self.dstpre+'.{0}_{1}_{2}'.format(self.chrom,self.st,self.ed)
# 1) exon, junctions, allpaths => csv (no header <= to concatenate bundles)
ecols = A3.EXDFCOLS #['chr','st','ed','strand','name','kind','ecov']
UT.write_pandas(self.exdf[ecols], pre+'.covs.exdf.txt.gz', '')
scols = A3.SJDFCOLS #['chr','st','ed','strand','name','kind','tcnt' ]#,'donor','acceptor','dp','ap']
UT.write_pandas(self.sjdf[scols], pre+'.covs.sjdf.txt.gz', '')
pcols = A3.PATHCOLS #['chr','st','ed','name','strand','tst','ted','tcov0','tcov1','tcov']
UT.write_pandas(self.paths[pcols], pre+'.covs.paths.txt.gz', '')
# write colored bed12 for tcov > th
tgt = self.paths[self.paths['tcov']>=self.tcovth].copy()
self.bed12 = A3.path2bed12(tgt, cmax=9, covfld='tcov')
GGB.write_bed(self.bed12, pre+'.covs.paths.bed.gz',ncols=12)
def bundle_estimator(modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom):
bname = A3.bundle2bname((chrom,st,ed))
bsuf = '.{0}_{1}_{2}'.format(chrom,st,ed)
csuf = '.{0}'.format(chrom)
sufs = ['.covs.exdf.txt.gz',
'.covs.sjdf.txt.gz',
'.covs.paths.txt.gz',
'.covs.paths.bed.gz',
]
done = []
for x in sufs:
done.append(os.path.exists(dstpre+bsuf+x) | \
os.path.exists(dstpre+csuf+x) | \
os.path.exists(dstpre+x) )
if all(done):
LOG.info('bunle {0} already done, skipping'.format(bname))
return bname
LOG.info('processing bunle {0}'.format(bname))
la = LocalEstimator(modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom)
return la.process()
def concatenate_bundles(bundles, dstpre):
# concat results
sufs = ['covs.exdf.txt.gz',
'covs.sjdf.txt.gz',
'covs.paths.txt.gz',
'covs.paths.bed.gz',
]
files = []
for suf in sufs:
dstpath = '{0}.{1}'.format(dstpre, suf)
if not os.path.exists(dstpath):
with open(dstpath, 'wb') as dst:
for chrom, st, ed in bundles:
bname = A3.bundle2bname((chrom,st,ed))
srcpath = '{0}.{1}_{2}_{3}.{4}'.format(dstpre, chrom, st, ed, suf)
files.append(srcpath)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src, dst)
else:
files+=['{0}.{1}_{2}_{3}.{4}'.format(dstpre, chrom, st, ed, suf) for chrom,st,ed in bundles]
# cleanup
for f in files:
if os.path.exists(f):
os.unlink(f)
def estimatecovs(modelpre, bwpre, dstpre, genome, tcovth=1, usegeom=True, np=6):
bed = GGB.read_bed(modelpre+'.paths.withse.bed.gz')
chroms = bed['chr'].unique()
csizedic = UT.df2dict(UT.chromdf(genome), 'chr', 'size')
bundles = []
args = []
for chrom in chroms:
sub = bed[(bed['chr']==chrom)]
uc = UT.union_contiguous(sub[['chr','st','ed']], returndf=True)
# total about 30K=> make batch of ~1000
n = len(uc)
nb = int(N.ceil(n/1000.))
for i in range(nb):
sti = 1000*i
edi = min(1000*(i+1), len(uc)-1)
st = max(uc.iloc[sti]['st'] - 100, 0)
ed = min(uc.iloc[edi]['ed'] + 100, csizedic[chrom])
args.append([modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom])
bundles.append((chrom,st,ed))
rslts = UT.process_mp(bundle_estimator, args, np=np, doreduce=False)
concatenate_bundles(bundles, dstpre)
class CovEstimator(object):
def __init__(self, modelpre, bwpre, dstpre, genome, tcovth=1, usegeom=False, np=6):
self.modelpre = modelpre
self.bwpre = bwpre
self.dstpre = dstpre
self.genome = genome
self.tcovth = tcovth
self.usegeom = usegeom
self.np = np
def run(self):
self.server = server = TQ.Server(np=self.np)
print('reading paths.withse.bed.gz')
bed = GGB.read_bed(self.modelpre+'.paths.withse.bed.gz')
chroms = bed['chr'].unique()
csizedic = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size')
self.bundlestatus = bundlestatus = {}
self.bundles = bundles = []
with server:
print('starting task server')
subid = 0
for chrom in chroms:
print('chrom {0}'.format(chrom))
sub = bed[(bed['chr']==chrom)]
uc = UT.union_contiguous(sub[['chr','st','ed']], returndf=True)
# total about 30K=> make batch of ~1000
n = len(uc)
nb = int(N.ceil(n/1000.))
print(chrom,nb)
for i in range(nb):
print('putting in bundle_estimator {0}.{1}'.format(chrom,subid))
sti = 1000*i
edi = min(1000*(i+1), len(uc)-1)
st = max(uc.iloc[sti]['st'] - 100, 0)
ed = min(uc.iloc[edi]['ed'] + 100, csizedic[chrom])
args = [self.modelpre, self.bwpre, chrom, st, ed, self.dstpre, self.tcovth, self.usegeom]
tname = 'bundle_estimator.{0}'.format(subid)
subid += 1
task = TQ.Task(tname, bundle_estimator, args)
server.add_task(task)
bundles.append((chrom,st,ed))
nb = len(bundles)
while server.check_error():
try:
name, rslt = server.get_result(timeout=5)
except TQ.Empty:
name, rslt = None, None
if name is not None:
if name.startswith('bundle_estimator.'):
subid = name.split('.')[-1]
bundlestatus[subid] = rslt
if len(bundlestatus)==nb:
print('$$$$$$$$ putting in concatenate_bundles $$$$$$$$$$$')
tname='concatenate_bundles'
args = (bundles, self.dstpre)
task = TQ.Task(tname, concatenate_bundles, args)
server.add_task(task)
if name=='concatenate_bundles':
print('$$$$$$$$ concatenate_bundles done $$$$$$$$$$$')
break
print('Exit Loop')
print('Done')
############# Cov Collector ######################################################
class CovCollector(object):
def __init__(self, covpres, dstpre, np=7):
self.covpres = covpres
self.modelpre = covpres[0]
self.dstpre = dstpre
self.np = np
def run(self):
self.server = server = TQ.Server(np=self.np)
self.exdf = ex = UT.read_pandas(self.modelpre+'.covs.exdf.txt.gz', names=A3.EXDFCOLS)
self.chroms = chroms = ex['chr'].unique()
self.exstatus = exstatus = {}
self.sjstatus = sjstatus = {}
self.pastatus = pastatus = {}
exdone=False
sjdone=False
padone=False
n = len(self.covpres)
nb = int(N.ceil(n/50.))
with server:
for subid in range(nb):
covpressub = self.covpres[50*subid:50*(subid+1)]
# ex
tname = 'collect_ecov_subset.{0}'.format(subid)
args = (self.modelpre, covpressub, self.dstpre, subid)
task = TQ.Task(tname, collect_ecov_subset, args)
server.add_task(task)
# sj
tname = 'collect_tcnt_subset.{0}'.format(subid)
args = (self.modelpre, covpressub, self.dstpre, subid)
task = TQ.Task(tname, collect_tcnt_subset, args)
server.add_task(task)
# path
tname = 'collect_tcovs_subset.{0}'.format(subid)
args = (self.modelpre, covpressub, self.dstpre, subid)
task = TQ.Task(tname, collect_tcovs_subset, args)
server.add_task(task)
while server.check_error():
try:
name, rslt = server.get_result(timeout=5)
except TQ.Empty:
name, rslt = None, None
if name is not None:
if name.startswith('collect_ecov_subset.'):
subid = name.split('.')[-1]
exstatus[subid] = rslt
if len(exstatus)==nb:
print('$$$$$$$$ putting in concatenate_ecov_subsets $$$$$$$$$$$')
for chrom in chroms:
tname='concatenate_ecov_subsets'
args = (self.modelpre, self.dstpre, range(nb), chrom)
task = TQ.Task(tname, concatenate_ecov_subsets, args)
server.add_task(task)
if name.startswith('collect_tcnt_subset.'):
subid = name.split('.')[-1]
sjstatus[subid] = rslt
if len(sjstatus)==nb:
print('$$$$$$$$ putting in concatenate_tcnt_subsets $$$$$$$$$$$')
for chrom in chroms:
tname='concatenate_tcnt_subsets'
args = (self.modelpre, self.dstpre, range(nb), chrom)
task = TQ.Task(tname, concatenate_tcnt_subsets, args)
server.add_task(task)
if name.startswith('collect_tcovs_subset.'):
subid = name.split('.')[-1]
pastatus[subid] = rslt
if len(pastatus)==nb:
print('$$$$$$$$ putting in concatenate_tcovs_subsets $$$$$$$$$$$')
for chrom in chroms:
tname='concatenate_tcovs_subsets'
args = (self.modelpre, self.dstpre, range(nb), chrom)
task = TQ.Task(tname, concatenate_tcovs_subsets, args)
server.add_task(task)
if name=='concatenate_ecov_subsets':
print('$$$$$$$$ concatenate_ecov_subsets done $$$$$$$$$$$')
exdone=True
if name=='concatenate_tcnt_subsets':
print('$$$$$$$$ concatenate_tcnt_subsets done $$$$$$$$$$$')
sjdone=True
if name=='concatenate_tcovs_subsets':
print('$$$$$$$$ concatenate_tcovs_subsets done $$$$$$$$$$$')
padone=True
if exdone&sjdone&padone:
break
print('Exit Loop')
print('Done')
def collect_ecov_subset(modelpre, covpressub, dstpre, subid):
return _collect_subset(modelpre, covpressub, dstpre, subid, 'ex')
def concatenate_ecov_subsets(modelpre, dstpre, subids, chrom):
return _concatenate_subsets(modelpre, dstpre, subids, 'ex', chrom)
def collect_tcnt_subset(modelpre, covpressub, dstpre, subid):
return _collect_subset(modelpre, covpressub, dstpre, subid, 'sj')
def concatenate_tcnt_subsets(modelpre, dstpre, subids, chrom):
return _concatenate_subsets(modelpre, dstpre, subids, 'sj', chrom)
def collect_tcovs_subset(modelpre, covpressub, dstpre, subid):
return _collect_subset(modelpre, covpressub, dstpre, subid, 'pa')
def concatenate_tcovs_subsets(modelpre, dstpre, subids, chrom):
return _concatenate_subsets(modelpre, dstpre, subids, 'pa', chrom)
def _collect_subset(modelpre, covpressub, dstpre, subid, which):
if which == 'ex':
suf = 'exdf'
flds = ['ecov']
fsuf = 'ecovs'
cols = A3.EXDFCOLS
elif which == 'sj':
suf = 'sjdf'
flds = ['tcnt']
fsuf = 'tcnts'
cols = A3.SJDFCOLS
else:
suf = 'paths'
flds = ['tcov0','tcov']
fsuf = 'tcovs'
cols = A3.PATHCOLS
ex0 = UT.read_pandas(modelpre+'.covs.{0}.txt.gz'.format(suf), names=cols)
chroms = ex0['chr'].unique()
# read in exdf sort, transpose and write(append) to dst
if all([os.path.exists(dstpre+'.{1}.{0}.txt.gz'.format(c,fsuf)) for c in chroms]):
return []
if all([os.path.exists(dstpre+'.{2}.{0}.{1}.txt.gz'.format(c,subid,fsuf)) for c in chroms]):
return []
ex0.sort_values(['chr','st','ed','strand'], inplace=True)
names = []
for pre in covpressub:
name = pre.split('/')[-1]
ex1 = UT.read_pandas(pre+'.covs.{0}.txt.gz'.format(suf), names=cols)
ex1.sort_values(['chr','st','ed','strand'], inplace=True)
for f in flds:
cname = '{0}.{1}'.format(name, f)
ex0[cname] = ex1[f].values
names.append(cname)
ex0.reset_index(inplace=True)
files = []
for chrom in ex0['chr'].unique():
ex0chr = ex0[ex0['chr']==chrom].sort_values(['st','ed','strand'])
dst = dstpre+'.{2}.{0}.{1}.txt.gz'.format(chrom,subid,fsuf)
UT.write_pandas(ex0chr[names].T, dst, 'i')
files.append(dst)
return files
def _concatenate_subsets(modelpre, dstpre, subids, which, chrom):
if which == 'ex':
suf = 'exdf'
fsuf = 'ecovs'
cols = A3.EXDFCOLS
elif which == 'sj':
suf = 'sjdf'
fsuf = 'tcnts'
cols = A3.SJDFCOLS
else:
suf = 'paths'
fsuf = 'tcovs'
cols = A3.PATHCOLS
ex0 = UT.read_pandas(modelpre+'.covs.{0}.txt.gz'.format(suf), names=cols)
chroms = ex0['chr'].unique()
files = []
dstpath0 = dstpre+'.{1}.{0}.tmp.txt.gz'.format(chrom,fsuf)
dstpath1 = dstpre+'.{1}.{0}.txt.gz'.format(chrom,fsuf)
if not os.path.exists(dstpath1):
with open(dstpath0, 'wb') as dst:
for subid in subids:
srcpath = dstpre+'.{2}.{0}.{1}.txt.gz'.format(chrom,subid,fsuf)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
ex0chr = ex0[ex0['chr']==chrom].sort_values(['st','ed','strand'])
ex1chr = UT.read_pandas(dstpath0,names=ex0chr.index,index_col=[0]).T
df = PD.concat([ex0chr, ex1chr],axis=1)
UT.write_pandas(df, dstpath1, 'h')
files.append(dstpath0)
#os.unlink(dstpath0)
for f in files:
if os.path.exists(f):
os.unlink(f)
return dstpath1
############# SJ count Collector ####################################################
import os
from jgem import gtfgffbed as GGB
import os
from jgem import gtfgffbed as GGB
def collect_one(bwpre):
# because of unstranded data name (jid) cannot be trusted
# just use locus (chr:st-ed) (st<ed)
sjpaths = GGB.read_bed(bwpre+'.sjpath.bed.gz')
sjpaths['ucnt'] = sjpaths['sc1']
sjpaths['tcnt'] = sjpaths['sc2']
sjpaths['jids'] = sjpaths['name'].str.split(',')
sj = UT.flattendf(sjpaths, 'jids')
sj['sted'] = [[int(y) for y in x.split('|')] for x in sj['jids']]
#idxp = sj['strand'].isin(['+','.'])
sj['st'] = [min(x) for x in sj['sted']]
sj['ed'] = [max(x) for x in sj['sted']]
sj['locus'] = UT.calc_locus(sj)
l2u = UT.df2dict(sj, 'locus', which)
return l2u
def collect_sjcnts_worker(idf, subsi, acode, which, dstpath):
# which tcnt, ucnt
# idf ['locus']
cols = []
for sname, bwpre in subsi[['name','bwpre']].values:
l2u = collect_one(bwpre)
idf[sname] = [l2u.get(x,0) for x in idf['locus']]
cols.append(sname)
UT.write_pandas(idf[cols], dstpath, 'ih') # don't want non-sample columns
return dstpath
def collect_sjcnts(dataset_code, si, assembly_code, modelpre, which, outdir, np=7):
"""
Args:
dataset_code: identifier to indicate dataset
si: dataset sampleinfo dataframe
(required cololums: name, sjbed_path=path to (converted) raw juncton count file)
assembly_code: identifier for assembly
sjexpre: assembly sjex path prefix
which: ucnt, mcnt, jcnt=ucnt or mcnt (when ucnt=0)
outdir: output directory
"""
sj = UT.read_pandas(modelpre+'.sj.txt.gz')
#sj['st'] = sj['st-1'] # old format
sj['locus'] = UT.calc_locus(sj,'chr','st-1','ed')
#sj['uid'] = sj['chr']+':'+sj['name']
idf = sj[['_id', 'locus']].set_index('_id')
#idf = sj[['_id', 'uid']].copy()
dstpre = os.path.join(outdir, '{0}.{1}'.format(dataset_code, assembly_code))
batchsize = int(N.ceil(len(si)/float(np)))
args = []
files = []
si1 = si[['name','bwpre']]
for i in range(np):
subsi = si1.iloc[i*batchsize:(i+1)*batchsize].copy()
dstpath = dstpre+'.{0}.part{1}.txt.gz'.format(which, i)
files.append(dstpath)
args.append((idf, subsi, assembly_code, which, dstpath))
rslts = UT.process_mp(collect_sjcnts_worker, args, np=np, doreduce=False)
# concat part files
dfs = [UT.read_pandas(fpath, index_col=[0]) for fpath in files]
df = PD.concat(dfs, axis=1)
dstpath = dstpre+'.{0}s.txt.gz'.format(which)
UT.write_pandas(df, dstpath, 'ih')
for fpath in files:
os.unlink(fpath)
return df
###################
def fix_i53completematch(exdf, paths):
# extend edge of 5'3' exons if they completely match to internal exons
idxp = exdf['strand'].isin(A3.STRS['+'])
idx5 = exdf['kind']=='5'
idx3 = exdf['kind']=='3'
idxi = exdf['kind']=='i'
ileft = (idxp&idx5)|(~idxp&idx3)
iright = (idxp&idx3)|(~idxp&idx5)
steds = set([(c,x,y) for c,x,y in exdf[idxi][['chr','st','ed']].values])
idxm = N.array([(c,x,y) in steds for c,x,y in exdf[['chr','st','ed']].values], dtype=bool)
imleft = ileft&idxm
imright = iright&idxm
while (N.sum(imleft)+N.sum(imright))>0:
# fix exdf st,ed
exdf.loc[imleft,'st'] = exdf[imleft]['st']-10
exdf.loc[imright, 'ed'] = exdf[imright]['ed']+10
# make old name => new name map
im5 = (imleft|imright)&idx5
im3 = (imleft|imright)&idx3
LOG.info('{0} 5exon fixed, {1} 3exon fixed'.format(N.sum(im5),N.sum(im3)))
tmp = exdf[im5][['chr','name','st','ed','strand']].values
n2n5 = dict([('{0}:{1}'.format(c,n),A3._pc(s,e,strand,',')) for c,n,s,e,strand in tmp])
tmp = exdf[im3][['chr','name','st','ed','strand']].values
n2n3 = dict([('{0}:{1}'.format(c,n),A3._pc(s,e,strand,',')) for c,n,s,e,strand in tmp])
# fix path name, st, ed
p5ids = ['{0}:{1}'.format(c,n.split('|')[0]) for c,n in paths[['chr','name']].values]
p3ids = ['{0}:{1}'.format(c,n.split('|')[-1]) for c,n in paths[['chr','name']].values]
p5idx = N.array([x in n2n5 for x in p5ids], dtype=bool)
p3idx = N.array([x in n2n3 for x in p3ids], dtype=bool)
def _fix5(c,n,n2n5):
tmp = n.split('|')
n5 = n2n5['{0}:{1}'.format(c,tmp[0])]
return '|'.join([n5]+tmp[1:])
def _fix3(c,n,n2n3):
tmp = n.split('|')
n3 = n2n3['{0}:{1}'.format(c,tmp[-1])]
return '|'.join(tmp[:-1]+[n3])
paths.loc[p5idx,'name'] = [_fix5(c,n,n2n5) for c,n in paths[p5idx][['chr','name']].values]
paths.loc[p3idx,'name'] = [_fix3(c,n,n2n3) for c,n in paths[p3idx][['chr','name']].values]
pidx = p5idx|p3idx
def _st(n):
tmp = n.split(',')
st0 = int(tmp[0])
ed0 = int(tmp[-1])
return min(st0,ed0)
def _ed(n):
tmp = n.split(',')
st0 = int(tmp[0])
ed0 = int(tmp[-1])
return max(st0,ed0)
paths.loc[pidx,'st'] = [_st(n) for n in paths[pidx]['name']]
paths.loc[pidx,'ed'] = [_ed(n) for n in paths[pidx]['name']]
# fix exdf name
exdf.loc[im5, 'name'] = [_fix5(c,n,n2n5) for c,n in exdf[im5][['chr','name']].values]
exdf.loc[im3, 'name'] = [_fix3(c,n,n2n3) for c,n in exdf[im3][['chr','name']].values]
idxm = N.array([(c,x,y) in steds for c,x,y in exdf[['chr','st','ed']].values], dtype=bool)
imleft = ileft&idxm
imright = iright&idxm
################### 5gr,53gr cov
def heads(paths, chrom, strand):
#if 'id' not in paths:
# paths['id']= paths['chr']+':'+paths['name'] #N.arange(len(paths))
p = paths[(paths['strand'].isin(A3.STRS[strand]))&(paths['chr']==chrom)]
if 'pc' in paths:
heads = [([int(y) for y in x.split('|')[0].split(',')], i) for i,x in p[['id','pc']].values]
else:
heads = [([int(y) for y in x.split('|')[0].split(',')], i) for i,x in p[['id','name']].values]
if strand in ['-','.-']:
heads = [(x[0][::-1],x[1]) for x in heads]
return heads
def headgroups(heads):
heads = sorted(heads)
def _gen():
cids = [heads[0][1]]
cst,ced = heads[0][0]
for (st,ed), nid in heads[1:]:
if st<ced: # overlap
# add to current group
cids.append(nid)
# expand
ced = max(ced, ed)
else: # yield current group and make new
yield ([cst,ced], cids)
cst,ced = st,ed
cids = [nid]
yield ([cst,ced],cids)
return [x for x in _gen()]
def find_all_5groups(paths):
hgs = {} # head groups
i2g = {} # id => group map
if 'pc' in paths:
idxme = paths['pc'].str.contains('\|')
paths['id'] = paths['chr']+':'+paths['pc']
else:
idxme = paths['name'].str.contains('\|')
paths['id'] = paths['chr']+':'+paths['name']
mepaths = paths[idxme]
sepaths = paths[~idxme]
for chrom in paths['chr'].unique():
hgs[chrom] = {}
for strand in ['+','-']:
h = heads(mepaths, chrom, strand)
hg = headgroups(h)
print('{0}:{1}:#hg={2}'.format(chrom,strand,len(hg)))
hgs[chrom][strand] = hg
for (st,ed),ids in hg:
g = '{0}:{1}-{2}:{3}'.format(chrom,st,ed,strand) # group id
for i in ids:
i2g[i] = g
for chrom,st,ed,i in sepaths[['chr','st','ed','id']].values:
i2g[i] = '{0}:{1}-{2}:s'.format(chrom,st,ed)
return i2g, hgs
# paths = GGB.read_bed(rdstpre+'.paths.withse.bed.gz')
def make_idmap(mdstpre):
ex = UT.read_pandas(mdstpre+'.ex.txt.gz')
paths = GGB.read_bed(mdstpre+'.paths.withse.bed.gz')
ex['id'] = ex['chr']+':'+ex['name']
i2gn = UT.df2dict(ex, 'id', 'gname')
paths['id'] = paths['chr']+':'+paths['name']
paths['id0'] = paths['chr']+':'+paths['name'].str.split('|').str[0]
#paths['gname'] = [i2gn[c+':'+x.split('|')[0]] for c,x in paths[['chr','name']].values]
paths['gname'] = [i2gn[x] for x in paths['id0']]
g2cnt = {}
tnames = []
for x in paths['gname']:
i = g2cnt.get(x,1)
tnames.append('{0}.{1}'.format(x,i))
g2cnt[x] = i+1
paths['tname'] = tnames
i2gn = UT.df2dict(paths, 'id', 'gname')
i2tn = UT.df2dict(paths, 'id', 'tname')
idf = | PD.DataFrame({'gname':i2gn, 'tname':i2tn}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import date_range
try:
import pandas.tseries.holiday
except ImportError:
pass
hcal = pd.tseries.holiday.USFederalHolidayCalendar()
class ApplyIndex(object):
goal_time = 0.2
params = [pd.offsets.YearEnd(), pd.offsets.YearBegin(),
pd.offsets.BYearEnd(), pd.offsets.BYearBegin(),
| pd.offsets.QuarterEnd() | pandas.offsets.QuarterEnd |
#### Filename: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and get atalaia dataframe.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy as np
import time
from multiprocessing import Process, Pool
from threading import Thread
import collections
import datetime
import csv
from dateutil.relativedelta import relativedelta
import json
class Connection():
""" The class connecting to the database and exporting the data for the Slovakia.
:param nprocess: number of processes
:type nprocess: int
:param data: the name of data (resq or atalaia)
:type data: str
"""
def __init__(self, nprocess=1, data='resq'):
start = time.time()
# Create log file in the working folder
debug = 'debug_' + datetime.datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info('Connecting to datamix database!')
# Get absolute path
path = os.path.dirname(__file__)
self.database_ini = os.path.join(path, 'database.ini')
# Read temporary csv file with CZ report names and Angels Awards report names
path = os.path.join(os.path.dirname(__file__), 'tmp', 'czech_mapping.json')
with open(path, 'r', encoding='utf-8') as json_file:
cz_names_dict = json.load(json_file)
# Set section
datamix = 'datamix-backup'
# datamix = 'datamix'
# Check which data should be exported
if data == 'resq':
# Create empty dictionary
# self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand', 'SELECT * from resq_ivttby_mix']
self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand']
# List of dataframe names
self.names = ['resq', 'ivttby', 'thailand']
elif data == 'atalaia':
self.sqls = ['SELECT * from atalaia_mix']
self.names = []
elif data == 'qasc':
self.sqls = ['SELECT * FROM qasc_mix']
self.names = []
elif data == 'africa':
self.sqls = ['SELECT * FROM africa_mix']
self.names = []
# Dictionary initialization - db dataframes
self.dictdb_df = {}
# Dictioanry initialization - prepared dataframes
self.dict_df = {}
if nprocess == 1:
if data == 'resq':
for i in range(0, len(self.names)):
df_name = self.names[i]
self.connect(self.sqls[i], datamix, nprocess, df_name=df_name)
# self.connect(self.sqls[2], datamix, nprocess, df_name='resq_ivttby_mix')
# self.resq_ivttby_mix = self.dictdb_df['resq_ivttby_mix']
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
# if 'resq_ivttby_mix' in self.dictdb_df.keys():
# del self.dictdb_df['resq_ivttby_mix']
for k, v in self.dictdb_df.items():
self.prepare_df(df=v, name=k)
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!".format(self.names[i]))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Get preprocessed data
self.preprocessed_data = self.check_data(df=self.df, nprocess=1)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not pd.isnull(x['HOSPITAL_TIME']) and not pd.isnull(x['HOSPITAL_DATE']) else None, axis=1)
#self.preprocessed_data['HOSPITAL_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_DATE'] + ' ' + self.preprocessed_data['HOSPITAL_TIME'])
except ValueError as error:
logging.error("Error occured when converting hospital date and time into timestamp object - {}.".format(error))
self.preprocessed_data['VISIT_DATE'] = self.preprocessed_data.apply(lambda x: self.fix_date(x['VISIT_DATE'], x['HOSPITAL_DATE']), axis=1)
self.preprocessed_data['VISIT_TIME'] = pd.to_datetime(self.preprocessed_data['VISIT_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['VISIT_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['VISIT_DATE'], x['VISIT_TIME']) if not pd.isnull(x['VISIT_TIME']) and not pd.isnull(x['VISIT_DATE']) else None, axis=1)
#self.preprocessed_data['VISIT_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['VISIT_DATE'] + ' ' + self.preprocessed_data['VISIT_TIME'])
except ValueError as error:
logging.error("Error occured when converting visit date and time into timestamp object - {}.".format(error))
# Get difference in minutes between hospitalization and last visit
self.preprocessed_data['LAST_SEEN_NORMAL'] = self.preprocessed_data.apply(lambda x: self.time_diff(x['VISIT_TIMESTAMP'], x['HOSPITAL_TIMESTAMP']), axis=1)
self.preprocessed_data['LAST_SEEN_NORMAL'].fillna(0, inplace=True)
# Create new column to set if patient has stroke in hospital and recanalization procedures were entered in timestamps
self.preprocessed_data['HOSPITAL_STROKE_IVT_TIMESTAMPS'] = np.nan
self.preprocessed_data.loc[
(self.preprocessed_data['HOSPITAL_STROKE'] == 1) &
((self.preprocessed_data['IVT_ONLY'] == 2) |
(self.preprocessed_data['IVT_TBY'] == 2) |
(self.preprocessed_data['IVT_TBY_REFER'] == 2)),
'HOSPITAL_STROKE_IVT_TIMESTAMPS'] = 1
self.preprocessed_data['HOSPITAL_STROKE_TBY_TIMESTAMPS'] = np.nan
self.preprocessed_data.loc[
(self.preprocessed_data['HOSPITAL_STROKE'] == 1) &
((self.preprocessed_data['IVT_TBY'] == 2) |
(self.preprocessed_data['TBY_ONLY'] == 2) |
(self.preprocessed_data['TBY_REFER_LIM'] == 2) |
(self.preprocessed_data['TBY_REFER_ALL'] == 2)),
'HOSPITAL_STROKE_TBY_TIMESTAMPS'] = 1
elif data == 'atalaia':
self.connect(self.sqls[0], datamix, nprocess, df_name='atalaia_mix')
self.atalaiadb_df = self.dictdb_df['atalaia_mix']
#self.atalaia_preprocessed_data = self.prepare_atalaia_df(self.atalaiadb_df)
self.atalaia_preprocessed_data = self.atalaiadb_df.copy()
del self.dictdb_df['atalaia_mix']
elif data == 'qasc':
self.__get_qasc_df(datamix, nprocess)
elif data == 'africa':
self.__get_africa_df(datamix, nprocess)
else:
if data == 'resq':
threads = []
for i in range(0, len(self.names)):
df_name = self.names[i]
process = Thread(target=self.connect(self.sqls[i], datamix, i, df_name=df_name))
process.start()
threads.append(process)
# logging.info('The process with id {0} is running.'.format(process))
process = Thread(target=self.connect(self.sqls[2], datamix, 1, df_name='resq_ivttby_mix'))
process.start()
threads.append(process)
for process in threads:
process.join()
end = time.time()
tdelta = (end-start)/60
logging.info('The database data were exported in {0} minutes.'.format(tdelta))
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
if 'resq_ivttby_mix' in self.dictdb_df.keys():
del self.dictdb_df['resq_ivttby_mix']
treads = []
for i in range(0, len(self.names)):
df_name = self.names[i]
process = Thread(target=self.prepare_df(df=self.dictdb_df[df_name], name=df_name))
process.start()
threads.append(process)
for process in threads:
process.join()
end = time.time()
tdelta = (end-start)/60
logging.info('The database data were prepared in {0} minutes.'.format(tdelta))
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!.".format(self.names[i]))
subject_ids = self.df['Subject ID'].tolist()
duplicates = [item for item, count in collections.Counter(subject_ids).items() if count > 1]
for i in duplicates:
duplicates_rows = self.df[(self.df['Subject ID'] == i) & (~pd.isnull(self.df['crf_parent_name']))]
set_tmp = set(duplicates_rows['Protocol ID'])
if len(set_tmp) == 1:
crfs = duplicates_rows['crf_parent_name'].tolist()
#print(duplicates_rows[['Subject ID', 'Protocol ID']])
for i in crfs:
if 'RESQV12' in i:
keep_crf = i
if 'RESQV20' in i:
keep_crf = i
if 'IVT_TBY' in i and 'DEVCZ10' not in i:
keep_crf = i
index = duplicates_rows.index[duplicates_rows['crf_parent_name'] != keep_crf].tolist()
self.df.drop(index, inplace=True)
#print(duplicates_rows['crf_parent_name'])
#print("Keep form: {0}, deleted row: {1}".format(keep_crf, index))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Cal check data function
self.preprocessed_data = self.check_data(self.df, nprocess=nprocess)
#self.preprocessed_data = self.check_data(self.df, nprocess=None)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not pd.isnull(x['HOSPITAL_TIME']) and not pd.isnull(x['HOSPITAL_DATE']) else None, axis=1)
#self.preprocessed_data['HOSPITAL_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_DATE'] + ' ' + self.preprocessed_data['HOSPITAL_TIME'])
except ValueError as error:
logging.error("Error occured when converting hospital date and time into timestamp object - {}.".format(error))
self.preprocessed_data['VISIT_DATE'] = self.preprocessed_data.apply(lambda x: self.fix_date(x['VISIT_DATE'], x['HOSPITAL_DATE']), axis=1)
self.preprocessed_data['VISIT_TIME'] = pd.to_datetime(self.preprocessed_data['VISIT_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['VISIT_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['VISIT_DATE'], x['VISIT_TIME']) if not | pd.isnull(x['VISIT_TIME']) | pandas.isnull |
"""
***************************************************************************************
Description: This module is designed to perform calculations that affect production
due to frac hit mitagation operational shut-ins.
***********************************************************************************
Input Parameters: | N/A
Output Parameters: | N/A
Tables Accessed: | All of LE Schema
Tables Affected: | N/A
----------------------------------------------------------------------------------
Version Control
----------------------------------------------------------------------------------
Version Developer Date Change
------- --------- ---------- ------------------------------------------------
1.0 <NAME> 08/01/2019 Initial Creation
***************************************************************************************
"""
import sys
sys.path.append('../')
def FracHatMitigation(LEName, EastWestFracHitRadius, NorthSouthFracHitRadius, Update_User):
from Model import ModelLayer as m
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
Success = True
Messages = []
try:
#Find Frac Date of Upcoming Wells
#Get the beginning and end date of the LE being evaluated
LEDataObj = m.LEData('', [LEName], [], [])
LErows, Success, Message = LEDataObj.ReadTable()
if not Success:
Messages.append(Message)
elif len(LErows) < 1:
Messages.append('No LE Data exists for the given LE Name.')
else:
LErows_df = pd.DataFrame([vars(s) for s in LErows])
start_date = LErows_df['Date_Key'].min()
end_date = LErows_df['Date_Key'].max()
s_start_date = datetime.strftime(start_date, '%m/%d/%Y')
s_end_date = datetime.strftime(end_date, '%m/%d/%Y' )
#Query the Drill Schedule (and potentially other data sources) for upcoming drills
new_drill_query = qf.GetActenumDrillScheduleData(s_start_date, s_end_date)
DBObj = bpx.GetDBEnvironment('ProdEDH', 'OVERRIDE')
dso_results = DBObj.Query(new_drill_query)
if not Success:
Messages.append(Message)
else:
dso_df = dso_results[1]
for nd_idx, nd_row in dso_df.iterrows():
#Get Lateral and Longitude values
surface_lat = nd_row['SurfaceLatitude']
surface_long = nd_row['SurfaceLongitude']
if surface_long > 0:
surface_long = 0 - surface_long
bh_lat = nd_row['BottomHoleLatitude']
bh_long = nd_row['BottomHoleLongitude']
if bh_long > 0:
bh_long = 0 - bh_long
stages = nd_row['ExpectedStages']
name = nd_row['WellName']
frac_start = nd_row['StartFracDate']
frac_end = nd_row['EndFracDate']
#Get wells within certain distance
FracHitRadius = max(EastWestFracHitRadius, NorthSouthFracHitRadius)
from_surface_query = qf.GetWellsWithinBearing(surface_lat, surface_long, FracHitRadius)
from_bottom_query = qf.GetWellsWithinBearing(bh_lat, bh_long, FracHitRadius)
surface_res = DBObj.Query(from_surface_query)
bh_res = DBObj.Query(from_bottom_query)
if not surface_res[1].empty:
all_res = surface_res[1]
if not bh_res[1].empty:
all_res = pd.merge(surface_res[1], bh_res[1])
elif not bh_res[1].empty:
all_res = bh_res[1]
else:
all_res = | pd.DataFrame() | pandas.DataFrame |
import asyncio
import logging
import os
import time
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple
import pandas as pd
from aiohttp import ClientSession
from pydantic import Field, PrivateAttr
from toucan_connectors.common import ConnectorStatus, get_loop
from toucan_connectors.oauth2_connector.oauth2connector import (
OAuth2Connector,
OAuth2ConnectorConfig,
)
from toucan_connectors.toucan_connector import (
ConnectorSecretsForm,
DataSlice,
ToucanConnector,
ToucanDataSource,
)
from .constants import MAX_RUNS, PER_PAGE
from .helpers import DICTIONARY_OF_FORMATTERS, build_df, build_empty_df
AUTHORIZATION_URL: str = 'https://dashboard-v2.aircall.io/oauth/authorize'
SCOPE: str = 'public_api'
TOKEN_URL: str = 'https://api.aircall.io/v1/oauth/token'
BASE_ROUTE: str = 'https://api.aircall.io/v1'
NO_CREDENTIALS_ERROR = 'No credentials'
class AircallRateLimitExhaustedException(Exception):
"""Raised when the extraction reached the max amount of request"""
class NoCredentialsError(Exception):
"""Raised when no secrets avaiable."""
class AircallDataset(str, Enum):
calls = 'calls'
tags = 'tags'
users = 'users'
async def fetch_page(
dataset: str,
data_list: List[dict],
session: ClientSession,
limit,
current_pass: int,
new_page=1,
delay_counter=0,
*,
query_params=None,
) -> List[dict]:
"""
Fetches data from AirCall API
dependent on existence of other pages and call limit
"""
endpoint = f'{BASE_ROUTE}/{dataset}?per_page={PER_PAGE}&page={new_page}'
try:
if query_params:
data: dict = await fetch(endpoint, session, query_params)
else:
data: dict = await fetch(endpoint, session)
logging.getLogger(__name__).info(
f'Request sent to Aircall for page {new_page} for dataset {dataset}'
)
aircall_error = data.get('error')
if aircall_error:
logging.getLogger(__name__).error(f'Aircall error has occurred: {aircall_error}')
delay_timer = 1
max_num_of_retries = 3
await asyncio.sleep(delay_timer)
if delay_counter < max_num_of_retries:
delay_counter += 1
logging.getLogger(__name__).info('Retrying Aircall API')
data_list = await fetch_page(
dataset,
data_list,
session,
limit,
current_pass,
new_page,
delay_counter,
query_params=query_params,
)
else:
logging.getLogger(__name__).error('Aborting Aircall requests')
raise AircallException(f'Aborting Aircall requests due to {aircall_error}')
delay_counter = 0
data_list.append(data)
next_page_link = None
meta_data = data.get('meta')
if meta_data is not None:
next_page_link: Optional[str] = meta_data.get('next_page_link')
if limit > -1:
current_pass += 1
if next_page_link is not None and current_pass < limit:
next_page = meta_data['current_page'] + 1
data_list = await fetch_page(
dataset,
data_list,
session,
limit,
current_pass,
next_page,
query_params=query_params,
)
else:
if next_page_link is not None:
next_page = meta_data['current_page'] + 1
data_list = await fetch_page(
dataset,
data_list,
session,
limit,
current_pass,
next_page,
query_params=query_params,
)
except AircallRateLimitExhaustedException as a:
reset_timestamp = int(a.args[0])
delay = reset_timestamp - (int(datetime.timestamp(datetime.utcnow())) + 1)
logging.getLogger(__name__).info(f'Rate limit reached, pausing {delay} seconds')
time.sleep(delay)
logging.getLogger(__name__).info('Extraction restarted')
data_list = await fetch_page(
dataset,
data_list,
session,
limit,
current_pass,
new_page,
delay_counter,
query_params=query_params,
)
return data_list
async def fetch(new_endpoint, session: ClientSession, query_params=None) -> dict:
"""The basic fetch function"""
async with session.get(new_endpoint, params=query_params) as res:
try:
rate_limit_reset = res.headers['X-AircallApi-Reset']
raise AircallRateLimitExhaustedException(rate_limit_reset)
except KeyError:
pass
return await res.json()
class AircallDataSource(ToucanDataSource):
limit: int = Field(MAX_RUNS, description='Limit of entries (default is 1 run)', ge=-1)
dataset: AircallDataset = 'calls'
class AircallConnector(ToucanConnector):
"""
This is a connector for [Aircall](https://developer.aircall.io/api-references/#endpoints)
using oAuth2 for authentication
"""
_auth_flow = 'oauth2'
provided_token: Optional[str]
auth_flow_id: Optional[str]
data_source_model: AircallDataSource
_oauth_trigger = 'instance'
oauth2_version = Field('1', **{'ui.hidden': True})
_oauth2_connector: OAuth2Connector = PrivateAttr()
@staticmethod
def get_connector_secrets_form() -> ConnectorSecretsForm:
return ConnectorSecretsForm(
documentation_md=(Path(os.path.dirname(__file__)) / 'doc.md').read_text(),
secrets_schema=OAuth2ConnectorConfig.schema(),
)
def __init__(self, **kwargs):
super().__init__(
**{k: v for k, v in kwargs.items() if k not in OAuth2Connector.init_params}
)
self._oauth2_connector = OAuth2Connector(
auth_flow_id=self.auth_flow_id,
authorization_url=AUTHORIZATION_URL,
scope=SCOPE,
token_url=TOKEN_URL,
secrets_keeper=kwargs['secrets_keeper'],
redirect_uri=kwargs['redirect_uri'],
config=OAuth2ConnectorConfig(
client_id=kwargs['client_id'],
client_secret=kwargs['client_secret'],
),
)
self.provided_token = kwargs.get('provided_token', None)
def build_authorization_url(self, **kwargs):
return self._oauth2_connector.build_authorization_url(**kwargs)
def retrieve_tokens(self, authorization_response: str):
"""
In the Aircall oAuth2 authentication process, client_id & client_secret
must be sent in the body of the request so we have to set them in
the mother class. This way they'll be added to her get_access_token method
"""
return self._oauth2_connector.retrieve_tokens(authorization_response)
def get_access_token(self):
if self.provided_token:
return self.provided_token
return self._oauth2_connector.get_access_token()
async def _fetch(self, url, headers=None, query_params=None):
"""Build the final request along with headers."""
async with ClientSession(headers=headers) as session:
return await fetch(url, session, query_params=query_params)
def _run_fetch(self, url):
"""Run loop."""
access_token = self.get_access_token()
if not access_token:
raise NoCredentialsError(NO_CREDENTIALS_ERROR)
headers = {'Authorization': f'Bearer {access_token}'}
loop = get_loop()
future = asyncio.ensure_future(self._fetch(url, headers))
return loop.run_until_complete(future)
async def _get_data(
self, dataset: str, limit, query_params=None
) -> Tuple[List[dict], List[dict]]:
"""Triggers fetches for data and does preliminary filtering process"""
access_token = self.get_access_token()
if not access_token:
raise NoCredentialsError(NO_CREDENTIALS_ERROR)
headers = {'Authorization': f'Bearer {access_token}'}
async with ClientSession(headers=headers) as session:
team_data, variable_data = await asyncio.gather(
fetch_page(
'teams',
[],
session,
limit,
0,
query_params=None, # for now we don't provide param while querying the teams endpoint
),
fetch_page(dataset, [], session, limit, 0, query_params=query_params),
)
team_response_list = []
variable_response_list = []
if len(team_data) > 0:
for data in team_data:
for team_obj in data['teams']:
team_response_list += DICTIONARY_OF_FORMATTERS['teams'](team_obj)
if len(variable_data) > 0:
for data in variable_data:
variable_response_list += [
DICTIONARY_OF_FORMATTERS.get(dataset, 'users')(obj) for obj in data[dataset]
]
return team_response_list, variable_response_list
async def _get_tags(self, dataset: str, limit) -> List[dict]:
"""Triggers fetches for tags and does preliminary filtering process"""
access_token = self.get_access_token()
if not access_token:
raise NoCredentialsError(NO_CREDENTIALS_ERROR)
headers = {'Authorization': f'Bearer {access_token}'}
async with ClientSession(headers=headers) as session:
raw_data = await fetch_page(
dataset,
[],
session,
limit,
1,
)
tags_data_list = []
for data in raw_data:
tags_data_list += data['tags']
return tags_data_list
def run_fetches(self, dataset, limit, query_params=None) -> Tuple[List[dict], List[dict]]:
"""sets up event loop and fetches for 'calls' and 'users' datasets"""
loop = get_loop()
future = asyncio.ensure_future(self._get_data(dataset, limit, query_params))
return loop.run_until_complete(future)
def run_fetches_for_tags(self, dataset, limit):
"""sets up event loop and fetches for 'tags' dataset"""
loop = get_loop()
future = asyncio.ensure_future(self._get_tags(dataset, limit))
return loop.run_until_complete(future)
def _retrieve_data(self, data_source: AircallDataSource, query_params=None) -> pd.DataFrame:
"""retrieves data from AirCall API"""
dataset = data_source.dataset
empty_df = build_empty_df(dataset)
# NOTE: no check needed on limit here because a non-valid limit
# raises a Pydantic ValidationError
limit = data_source.limit
if dataset == 'tags':
non_empty_df = pd.DataFrame([])
if limit != 0:
res = self.run_fetches_for_tags(dataset, limit)
non_empty_df = pd.DataFrame(res)
return pd.concat([empty_df, non_empty_df])
else:
team_data = | pd.DataFrame([]) | pandas.DataFrame |
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import time
FILENAME = {
"train": "./data/train_format1.csv",
"user_log": "./data/user_log_format1.csv",
"user_info": "./data/user_info_format1.csv",
}
TESTNAME = './data/test_format1.csv'
def time_cost(func):
def wrapper(*args, **kw):
start = time.time()
res = func(*args, **kw)
end = time.time()
print(
"Function: {}, Cost: {:.3f}sec".format(
func.__name__,
(end - start)))
return res
return wrapper
def data_clean(data, fea, sigma=3):
data_mean = np.mean(data[fea])
data_std = np.std(data[fea], ddof=1)
delta = sigma * data_std
lower_thr = data_mean - delta
upper_thr = data_mean + delta
data[fea + '_outlier'] = data[fea].apply(
lambda x: str('T') if x > upper_thr or x < lower_thr else str('F'))
return data
@time_cost
def load_data(filename=FILENAME):
if filename is None:
filename = FILENAME
print('Loading Samples ...')
train = pd.read_csv(filename["train"])
user_info = pd.read_csv(filename["user_info"])
user_log = pd.read_csv(filename["user_log"])
user_log = user_log.drop(columns=["brand_id"])
print('Done.')
print('*' * 20)
print('Filling NaN Items ...')
user_info["age_range"] = user_info["age_range"].fillna(
user_info["age_range"].mode())
user_info["gender"] = user_info["gender"].fillna(2)
print('Done.')
print('*' * 20)
print('Merging Train Dataset ...')
# train = pd.merge(train, user_info, on="user_id", how="left")
# total_logs
total_logs_temp = user_log.groupby([user_log["user_id"], user_log["seller_id"]]).count(
).reset_index()[["user_id", "seller_id", "item_id"]]
total_logs_temp.rename(
columns={
"seller_id": "merchant_id",
"item_id": "total_logs"},
inplace=True)
train = pd.merge(
train, total_logs_temp, on=[
"user_id", "merchant_id"], how="left")
# item_count
item_count_temp = user_log.groupby([user_log["user_id"],
user_log["seller_id"],
user_log["item_id"]]).count().reset_index()[["user_id",
"seller_id",
"item_id"]]
item_count_temp = item_count_temp.groupby(
[item_count_temp["user_id"], item_count_temp["seller_id"]]).count().reset_index()
item_count_temp.rename(
columns={
"seller_id": "merchant_id",
"item_id": "item_count"},
inplace=True)
train = pd.merge(
train, item_count_temp, on=[
"user_id", "merchant_id"], how="left")
# cat_count
cat_count_temp = user_log.groupby([user_log["user_id"],
user_log["seller_id"],
user_log["cat_id"]]).count().reset_index()[["user_id",
"seller_id",
"cat_id"]]
cat_count_temp = cat_count_temp.groupby(
[cat_count_temp["user_id"], cat_count_temp["seller_id"]]).count().reset_index()
cat_count_temp.rename(
columns={
"seller_id": "merchant_id",
"cat_id": "cat_count"},
inplace=True)
train = pd.merge(
train, cat_count_temp, on=[
"user_id", "merchant_id"], how="left")
# click_on, add_cart, buy_up, mark_down
action_log_temp = pd.get_dummies(user_log, columns=["action_type"])
action_log_temp = action_log_temp.groupby([user_log["user_id"], user_log["seller_id"]]).agg(
{"action_type_0": sum, "action_type_1": sum, "action_type_2": sum, "action_type_3": sum})
action_log_temp = action_log_temp.reset_index()[["user_id",
"seller_id",
"action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"]]
action_log_temp.rename(
columns={
"seller_id": "merchant_id",
"action_type_0": "click_on",
"action_type_1": "add_cart",
"action_type_2": "buy_up",
"action_type_3": "mark_down"},
inplace=True)
train = pd.merge(
train, action_log_temp, on=[
"user_id", "merchant_id"], how="left")
# browse_days
browse_days_temp = user_log.groupby([user_log["user_id"],
user_log["seller_id"],
user_log["time_stamp"]]).count().reset_index()[["user_id",
"seller_id",
"time_stamp"]]
browse_days_temp = browse_days_temp.groupby([browse_days_temp["user_id"],
browse_days_temp["seller_id"]]).count().reset_index()
browse_days_temp.rename(
columns={
"seller_id": "merchant_id",
"time_stamp": "browse_days"},
inplace=True)
train = pd.merge(train, browse_days_temp, on=["user_id", "merchant_id"], how="left")
# bought_rate
bought_rate_temp = pd.get_dummies(user_log, columns=["action_type"])
bought_rate_temp = bought_rate_temp.groupby([user_log["user_id"]]).agg(
{"action_type_0": sum, "action_type_1": sum, "action_type_2": sum, "action_type_3": sum})
bought_rate_temp = bought_rate_temp.reset_index()[["user_id",
"action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"]]
bought_rate_temp["bought_rate"] = bought_rate_temp["action_type_2"] / (bought_rate_temp["action_type_0"]
+ bought_rate_temp["action_type_1"]
+ bought_rate_temp["action_type_2"]
+ bought_rate_temp["action_type_3"])
bought_rate_temp = bought_rate_temp.drop(columns=["action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"])
train = pd.merge(train, bought_rate_temp, on="user_id", how="left")
# sold_rate
sold_rate_temp = pd.get_dummies(user_log, columns=["action_type"])
sold_rate_temp = sold_rate_temp.groupby([user_log["seller_id"]]).agg(
{"action_type_0": sum, "action_type_1": sum, "action_type_2": sum, "action_type_3": sum})
sold_rate_temp = sold_rate_temp.reset_index()[["seller_id",
"action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"]]
sold_rate_temp["sold_rate"] = sold_rate_temp["action_type_2"] / (sold_rate_temp["action_type_0"]
+ sold_rate_temp["action_type_1"]
+ sold_rate_temp["action_type_2"]
+ sold_rate_temp["action_type_3"])
sold_rate_temp = sold_rate_temp.drop(columns=["action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"])
sold_rate_temp.rename(columns={"seller_id": "merchant_id"}, inplace=True)
train = pd.merge(train, sold_rate_temp, on="merchant_id", how="left")
print('Done.')
print('*' * 20)
label = train['label']
train = train.drop(columns=["user_id", "merchant_id", "label"])
# train["age_range"] = train["age_range"].astype(str)
# train["gender"] = train["gender"].astype(str)
feature = train
print('Shape of Dataset: {}'.format(feature.shape))
return feature, label
def load_test(testname=TESTNAME):
filename = FILENAME
print('Loading Tests ...')
test = pd.read_csv(testname)
user_info = pd.read_csv(filename["user_info"])
user_log = pd.read_csv(filename["user_log"])
user_log = user_log.drop(columns=["brand_id"])
print('Done.')
print('*' * 20)
print('Filling NaN Items ...')
user_info["age_range"] = user_info["age_range"].fillna(
user_info["age_range"].mode())
user_info["gender"] = user_info["gender"].fillna(2)
print('Done.')
print('*' * 20)
print('Merging Test Dataset ...')
# test = pd.merge(test, user_info, on="user_id", how="left")
# total_logs
total_logs_temp = user_log.groupby([user_log["user_id"], user_log["seller_id"]]).count(
).reset_index()[["user_id", "seller_id", "item_id"]]
total_logs_temp.rename(
columns={
"seller_id": "merchant_id",
"item_id": "total_logs"},
inplace=True)
test = pd.merge(
test, total_logs_temp, on=[
"user_id", "merchant_id"], how="left")
# item_count
item_count_temp = user_log.groupby([user_log["user_id"],
user_log["seller_id"],
user_log["item_id"]]).count().reset_index()[["user_id",
"seller_id",
"item_id"]]
item_count_temp = item_count_temp.groupby(
[item_count_temp["user_id"], item_count_temp["seller_id"]]).count().reset_index()
item_count_temp.rename(
columns={
"seller_id": "merchant_id",
"item_id": "item_count"},
inplace=True)
test = pd.merge(
test, item_count_temp, on=[
"user_id", "merchant_id"], how="left")
# cat_count
cat_count_temp = user_log.groupby([user_log["user_id"],
user_log["seller_id"],
user_log["cat_id"]]).count().reset_index()[["user_id",
"seller_id",
"cat_id"]]
cat_count_temp = cat_count_temp.groupby(
[cat_count_temp["user_id"], cat_count_temp["seller_id"]]).count().reset_index()
cat_count_temp.rename(
columns={
"seller_id": "merchant_id",
"cat_id": "cat_count"},
inplace=True)
test = pd.merge(
test, cat_count_temp, on=[
"user_id", "merchant_id"], how="left")
# click_on, add_cart, buy_up, mark_down
action_log_temp = pd.get_dummies(user_log, columns=["action_type"])
action_log_temp = action_log_temp.groupby([user_log["user_id"], user_log["seller_id"]]).agg(
{"action_type_0": sum, "action_type_1": sum, "action_type_2": sum, "action_type_3": sum})
action_log_temp = action_log_temp.reset_index()[["user_id",
"seller_id",
"action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"]]
action_log_temp.rename(
columns={
"seller_id": "merchant_id",
"action_type_0": "click_on",
"action_type_1": "add_cart",
"action_type_2": "buy_up",
"action_type_3": "mark_down"},
inplace=True)
test = pd.merge(
test, action_log_temp, on=[
"user_id", "merchant_id"], how="left")
# browse_days
browse_days_temp = user_log.groupby([user_log["user_id"],
user_log["seller_id"],
user_log["time_stamp"]]).count().reset_index()[["user_id",
"seller_id",
"time_stamp"]]
browse_days_temp = browse_days_temp.groupby([browse_days_temp["user_id"],
browse_days_temp["seller_id"]]).count().reset_index()
browse_days_temp.rename(
columns={
"seller_id": "merchant_id",
"time_stamp": "browse_days"},
inplace=True)
test = pd.merge(test, browse_days_temp, on=["user_id", "merchant_id"], how="left")
# bought_rate
bought_rate_temp = pd.get_dummies(user_log, columns=["action_type"])
bought_rate_temp = bought_rate_temp.groupby([user_log["user_id"]]).agg(
{"action_type_0": sum, "action_type_1": sum, "action_type_2": sum, "action_type_3": sum})
bought_rate_temp = bought_rate_temp.reset_index()[["user_id",
"action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"]]
bought_rate_temp["bought_rate"] = bought_rate_temp["action_type_2"] / (bought_rate_temp["action_type_0"]
+ bought_rate_temp["action_type_1"]
+ bought_rate_temp["action_type_2"]
+ bought_rate_temp["action_type_3"])
bought_rate_temp = bought_rate_temp.drop(columns=["action_type_0",
"action_type_1",
"action_type_2",
"action_type_3"])
test = | pd.merge(test, bought_rate_temp, on="user_id", how="left") | pandas.merge |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_where(self):
expected = self.index
result = self.index.where(self.index.notna())
tm.assert_index_equal(result, expected)
idx = IntervalIndex.from_breaks([1, 2])
result = idx.where([True, False])
expected = IntervalIndex.from_intervals(
[Interval(1.0, 2.0, closed='right'), np.nan])
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
pass
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
Interval(2, 3, closed='left'))
def test_take(self):
actual = self.index.take([0, 1])
assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
def test_monotonic_and_unique(self):
assert self.index.is_monotonic
assert self.index.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
assert idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
assert not idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
assert not idx.is_unique
assert idx.is_monotonic
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed='right')
assert i[0] == Interval(0.0, 1.0)
assert i[1] == Interval(1.0, 2.0)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right')
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right')
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed='right')
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_breaks([0, 1, 2], closed='both')
assert index.slice_locs(1, 1) == (0, 2)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self):
expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)])
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)])
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self):
other = IntervalIndex.from_arrays([2], [3])
expected = IntervalIndex.from_arrays(range(3), range(1, 4))
actual = self.index.union(other)
assert expected.equals(actual)
actual = other.union(self.index)
assert expected.equals(actual)
tm.assert_index_equal(self.index.union(self.index), self.index)
tm.assert_index_equal(self.index.union(self.index[:1]),
self.index)
def test_intersection(self):
other = IntervalIndex.from_breaks([1, 2, 3])
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.intersection(other)
assert expected.equals(actual)
tm.assert_index_equal(self.index.intersection(self.index),
self.index)
def test_difference(self):
tm.assert_index_equal(self.index.difference(self.index[:1]),
self.index[1:])
def test_symmetric_difference(self):
result = self.index[:1].symmetric_difference(self.index[1:])
expected = self.index
tm.assert_index_equal(result, expected)
def test_set_operation_errors(self):
pytest.raises(ValueError, self.index.union, self.index.left)
other = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
pytest.raises(ValueError, self.index.union, other)
def test_isin(self):
actual = self.index.isin(self.index)
tm.assert_numpy_array_equal(np.array([True, True]), actual)
actual = self.index.isin(self.index[:1])
tm.assert_numpy_array_equal(np.array([True, False]), actual)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self):
idx = pd.Index([np.nan, pd.Interval(0, 1), | pd.Interval(1, 2) | pandas.Interval |
import pandas as pd
import numpy as np
import pytest
from .time_gap_sizes import main
def test_basic():
pd.testing.assert_series_equal(
main(
data=pd.Series(
[10.0, 22.0, 18.0, 2.0],
index=pd.to_datetime(
[
"2019-08-01 15:20:10",
"2019-08-01 15:20:11",
"2019-08-01 15:20:14",
"2019-08-01 15:20:16",
]
),
)
)["gap_sizes"],
pd.Series(
[1, 3, 2],
index=pd.to_datetime(
["2019-08-01 15:20:11", "2019-08-01 15:20:14", "2019-08-01 15:20:16"]
),
),
check_dtype=False,
)
def test_():
with pytest.raises(
ValueError, match="length of data must be greater than 1, it is 1"
):
main(data=pd.Series({1: 0}))
def test_empty():
with pytest.raises(
ValueError, match="length of data must be greater than 1, it is 0"
):
main(data= | pd.Series(dtype=float) | pandas.Series |
from pyrebase import pyrebase
import collections
import firebase_admin
from firebase_admin import credentials
config = {
"apiKey": "AIzaSyCL8AqkgupmScHROiU8E0cta9YYigdGTaY",
"authDomain": "test1-a06b1.firebaseapp.com",
"databaseURL": "https://test1-a06b1.firebaseio.com",
"projectId": "test1-a06b1",
"storageBucket": "test1-a06b1.appspot.com",
"messagingSenderId": "383571108013",
"appId": "1:383571108013:web:ef563e8adb802358db2624",
"measurementId": "G-M38DQ6VRM7",
"serviceAccount": "key.json"
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
db = firebase.database()
from flask import *
from flask_admin import *
''' Dynamic Plot '''
import pandas as pd
import numpy as np
import urllib.request
from bokeh.plotting import figure
from flask import Flask, render_template, request
from bokeh.embed import components
from bokeh.models import BoxAnnotation, Legend, Label
from despike1 import despike
from ma import ma
import ruptures as rpt
from scipy.signal import savgol_filter
from datainput import input_sample
from dataoutput import output
from baseline_predict import baseline_prediction
from baseline_manual import baseline_poly_manual
import copy
app = Flask(__name__)
app.secret_key = "EB2017"
@app.route('/', methods=['GET', 'POST'])
def homepage():
if request.method == 'POST':
ufile = request.files['ufile'] #Menyimpan file ke variabel
fname = ufile.filename
ftype = fname.split('.')[1] #Mengidentifikasi tipe file
storage.child(ftype+'/'+fname).put(ufile) #Memasukkan file ke storage
flink = storage.child(ftype+'/'+fname).get_url(None) #Mendapatkan URL dari file
# Session untuk passing data dari halaman satu ke halaman lain
session['flink'] = flink
session['ftype'] = ftype
newdata = {"Pengujian": "18", "Pasien": "patientId18", "Tanggal": "23/07/20", "Hasil": "Negatif", "DataCSV": flink, "Type": ftype}
session['data'] = newdata
db.child("Tests").push(newdata)
if (ftype == 'sp2') or (ftype == 'sp28'):
return redirect(url_for('input_user'))
else:
return redirect(url_for('input_user2'))
return render_template('index.html')
@app.route('/user', methods=['GET', 'POST'])
def input_user():
if request.method == 'POST':
# Menyimpan masukan dari pengguna ke dalam variabel
baseline = request.values['baseline']
sampel1 = request.values['sampel1']
konsen1 = request.values['konsen1']
sampel2 = request.values['sampel2']
konsen2 = request.values['konsen2']
sampel3 = request.values['sampel3']
konsen3 = request.values['konsen3']
sampel4 = request.values['sampel4']
konsen4 = request.values['konsen4']
session['baseline'] = baseline
session['sampel1'] = sampel1
session['konsen1'] = konsen1
session['sampel2'] = sampel2
session['konsen2'] = konsen2
session['sampel3'] = sampel3
session['konsen3'] = konsen3
session['sampel4'] = sampel4
session['konsen4'] = konsen4
return redirect(url_for('grafik_baseline'))
return render_template('input_user.html')
@app.route('/user2', methods=['GET', 'POST'])
def input_user2():
if request.method == 'POST':
baseline = request.values['baseline']
session['baseline'] = baseline
return redirect(url_for('grafik_baseline'))
return render_template('input_user2.html')
@app.route('/recent_tests', methods=['GET', 'POST'])
def view():
if request.method == 'POST':
keyId = request.form.get('keyId')
session['keyId'] = keyId
return redirect(url_for('testpage', keyId=keyId))
dbase = db.child("Tests").get()
return render_template('viewer.html', dbase = dbase)
@app.route('/grafik_baseline', methods = ['GET','POST'])
def grafik_baseline():
if request.method == 'POST':
if 'flink' in session:
flink = session['flink']
ftype = session['ftype']
# baseline
baseline = session['baseline']
pilih_baseline = int(baseline)
if (ftype == 'sp2') or (ftype == 'sp28'):
a_file = urllib.request.urlopen(flink) #Read raw file
list_of_lists = []
# Konversi file raw menjadi list of lists
for line in a_file:
stripped_line = line.strip()
line_list = stripped_line.split()
list_of_lists.append(line_list)
a_file.close()
# Mengubah list of lists menjadi suatu dataframe
data = pd.DataFrame(list_of_lists)
data.columns = ["time", "ch1", "ch2", "subsidiary", "difference"]
# Konversi dari dataframe ke array
data_numpy = data.to_numpy().transpose()
data_y = data_numpy[1][0:len(data_numpy[1])]
data_y = np.asfarray(data_y,float)
data_x = data_numpy[0][0:len(data_numpy[0])]
data_x = np.asfarray(data_x,float)
# menghilangkan outliers
dspk_y = despike(data_y,50)
# moving average filter
ma_y = ma(dspk_y,9)
# savitzky-golay filter
svg_y = savgol_filter(ma_y, window_length=41, polyorder=2)
# change points detection
chgpts_y = rpt.Pelt(model='l2').fit(svg_y)
result_y = chgpts_y.predict(pen=5000)
if pilih_baseline == 1:
base_y = baseline_poly_manual(data_x, svg_y)
#return render_template('grafik_baseline.html', **baseline_poly_manual.kwargs)
elif pilih_baseline == 2:
base_y = baseline_prediction(data_x, svg_y, result_y)
#return render_template('grafik_baseline.html', **baseline_prediction.kwargs)
elif (ftype == 'csv') or (ftype == 'xls') or (ftype == 'xlsx'):
if pilih_baseline == 1:
base_y = baseline_poly_manual(data_x, svg_y)
elif pilih_baseline == 2:
base_y = baseline_prediction(data_x, svg_y, result_y)
return redirect(url_for('content'))
@app.route('/result', methods = ['GET','POST'])
def content():
if 'flink' in session:
flink = session['flink']
ftype = session['ftype']
# baseline
baseline = session['baseline']
pilih_baseline = int(baseline)
if (ftype == 'sp2') or (ftype == 'sp28'):
sampel1 = session['sampel1']
konsen1 = session['konsen1']
sampel2 = session['sampel2']
konsen2 = session['konsen2']
sampel3 = session['sampel3']
konsen3 = session['konsen3']
sampel4 = session['sampel4']
konsen4 = session['konsen4']
a_file = urllib.request.urlopen(flink)
list_of_lists = []
for line in a_file:
stripped_line = line.strip()
line_list = stripped_line.split()
list_of_lists.append(line_list)
a_file.close()
data = pd.DataFrame(list_of_lists)
data.columns = ["time", "ch1", "ch2", "subsidiary", "difference"]
data_numpy = data.to_numpy().transpose()
data_y = data_numpy[1][0:len(data_numpy[1])]
data_y = np.asfarray(data_y,float)
data_x = data_numpy[0][0:len(data_numpy[0])]
data_x = np.asfarray(data_x,float)
# menghilangkan outliers
dspk_y = despike(data_y,50)
# moving average filter
ma_y = ma(dspk_y,9)
# savitzky-golay filter
svg_y = savgol_filter(ma_y, window_length=41, polyorder=2)
# change points detection
chgpts_y = rpt.Pelt(model='l2').fit(svg_y)
result_y = chgpts_y.predict(pen=5000)
if pilih_baseline == 1:
base_y = baseline_poly_manual(data_x, svg_y)
elif pilih_baseline == 2:
base_y = baseline_prediction(data_x, svg_y, result_y)
#mean
mean_sampel = []
dx = 10
j = 0 + dx
for i in range(0,len(result_y)):
temp = sum(svg_y[j:result_y[i]-dx])/len(svg_y[j:result_y[i]-dx])
mean_sampel.append(temp)
j = result_y[i] + dx
#mean sampel
sampel = []
for l in range(1,len(result_y)):
if (mean_sampel[l-1] < mean_sampel[l]):
temp = mean_sampel[l]
sampel.append(temp)
#lokasi sample
a = 1
lokasi_sample = []
for p in range(1,len(mean_sampel)):
if mean_sampel[p-1]<mean_sampel[p] :
t_in = data_x[result_y[p-1]]
t_out = data_x[result_y[p]]
lokasi_sample.append(str(t_in) + ";" + str(t_out))
a += 1
#Input Jenis Pengujian dan Konsentrasi Analit
jenis, konsentrasi = input_sample(sampel1,konsen1,sampel2,konsen2,sampel3,konsen3,sampel4,konsen4)
df = output(data_x, svg_y, lokasi_sample, jenis, konsentrasi)
x = df['A']
y = df['B']
elif ftype == 'csv':
df = pd.read_csv(flink, usecols = [0,1,2,3,4], names = ['A','B','C','D','E'])
x = df['A']
y = df['B']
data_numpy = df.to_numpy().transpose()
data_y = data_numpy[1][0:len(data_numpy[1])]
data_y = np.asfarray(data_y,float)
data_x = data_numpy[0][0:len(data_numpy[0])]
data_x = np.asfarray(data_x,float)
# change points detection
chgpts_y = rpt.Pelt(model='l2').fit(data_y)
result_y = chgpts_y.predict(pen=5000)
if pilih_baseline == 1:
base_y = baseline_poly_manual(data_x, data_y)
elif pilih_baseline == 2:
base_y = baseline_prediction(data_x, data_y, result_y)
elif (ftype == 'xlsx') or (ftype == 'xls'):
df = | pd.read_excel(flink, usecols = [0,1,2,3,4], names = ['A','B','C','D','E']) | pandas.read_excel |
import sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
import pandas as pd
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler
from lightgbm import LGBMClassifier, LGBMRegressor
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn_pandas import DataFrameMapper
from sklearn.model_selection import train_test_split
from nyoka import lgb_to_pmml
from nyoka import PMML44 as pml
import unittest
import requests
import json
from requests.auth import HTTPBasicAuth
import ast
import numpy
from adapaUtilities import AdapaUtility
class TestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("******* Unit Test for lightgbm *******")
iris = datasets.load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df["Species"] = iris.target
df["Binary"] = numpy.array([i%2 for i in range(df.shape[0])])
cls.X = df[iris.feature_names]
cls.Y = df["Species"]
cls.Y_bin = df["Binary"]
cls.features = iris.feature_names
cls.test_file = 'nyoka/tests/test.csv'
cls.X.to_csv(cls.test_file,index=False)
cls.adapa_utility = AdapaUtility()
def test_01_lgbm_classifier(self):
print("\ntest 01 (lgbm classifier with preprocessing) [binary-class]\n")
model = LGBMClassifier()
pipeline_obj = Pipeline([
('scaler',MinMaxScaler()),
("model", model)
])
pipeline_obj.fit(self.X,self.Y_bin)
file_name = "test01lgbm.pmml"
lgb_to_pmml(pipeline_obj, self.features, 'Species', file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, self.test_file)
model_pred = pipeline_obj.predict(self.X)
model_prob = pipeline_obj.predict_proba(self.X)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_02_lgbm_classifier(self):
print("\ntest 02 (lgbm classifier with preprocessing) [multi-class]\n")
model = LGBMClassifier()
pipeline_obj = Pipeline([
('scaler',MaxAbsScaler()),
("model", model)
])
pipeline_obj.fit(self.X,self.Y)
file_name = "test02lgbm.pmml"
lgb_to_pmml(pipeline_obj, self.features, 'Species', file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, self.test_file)
model_pred = pipeline_obj.predict(self.X)
model_prob = pipeline_obj.predict_proba(self.X)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
def test_03_lgbm_regressor(self):
print("\ntest 03 (lgbm regressor without preprocessing)\n")
model = LGBMRegressor()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(self.X,self.Y)
file_name = "test03lgbm.pmml"
lgb_to_pmml(pipeline_obj, self.features, 'Species', file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, self.test_file)
model_pred = pipeline_obj.predict(self.X)
predictions = numpy.array(predictions)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
def test_04_lgbm_regressor(self):
print("\ntest 04 (lgbm regressor with preprocessing)\n")
auto = | pd.read_csv('nyoka/tests/auto-mpg.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import torch
import pandas as pd
import json
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder as LE
import bisect
import torch
from datetime import datetime
from sklearn.model_selection import train_test_split
np.random.seed(22)
torch.manual_seed(22)
with open("../Data/batsmen.json", "r") as f:
batsmen = json.load(f)
with open("../Data/bowlers.json", "r") as f:
bowlers = json.load(f)
batsmen = {k: [x for x in v if x[1][1] >= 0] for k, v in batsmen.items()}
batsmen = {k: sorted(v, key=lambda x: x[0]) for k, v in batsmen.items() if v}
bowlers = {k: sorted(v, key=lambda x: x[0]) for k, v in bowlers.items() if v}
def getBatScores(scores):
# runs, balls, boundaries, contribs, out
array = []
for score in scores:
date = score[0]
_, runs, balls, fours, sixes, _, contrib = score[1]
boundaries = fours + sixes * 1.5
array.append((date, np.array([runs, balls, boundaries, contrib])))
return array
def getBowlScores(scores):
# overs, maidens, runs, wickets, contribs
array = []
for score in scores:
date = score[0]
overs, maidens, runs, wickets, _, contrib = score[1]
overs = int(overs) + (overs - int(overs)) * 10 / 6
array.append((date, np.array([overs, maidens, runs, wickets, contrib])))
return array
batsmen_scores = {k: getBatScores(v) for k, v in batsmen.items()}
bowlers_scores = {k: getBowlScores(v) for k, v in bowlers.items()}
_batsmen_scores = {k: {_v[0]: _v[1] for _v in v} for k, v in batsmen_scores.items()}
_bowlers_scores = {k: {_v[0]: _v[1] for _v in v} for k, v in bowlers_scores.items()}
att = pd.read_csv("../Data/attributes.csv")
att["BatHand"] = 0 + (att["Bats"].str.find("eft") > 0)
att["BowlHand"] = 0 + (att["Bowls"].str.find("eft") > 0)
att["BowlType"] = 0 + (
(att["Bowls"].str.find("ast") > 0) | (att["Bowls"].str.find("edium") > 0)
)
def getBatStats(scores):
dates, scorelist = [score[0] for score in scores], [score[1] for score in scores]
scorelist = np.array(scorelist)
cumscores = np.cumsum(scorelist, axis=0)
innings = np.arange(1, cumscores.shape[0] + 1)
average = cumscores[:, 0] / innings
sr = cumscores[:, 0] / (cumscores[:, 1] + 1)
contrib = cumscores[:, 3] / innings
stats = np.array([innings, average, sr, contrib]).T
return [datetime.strptime(date, "%Y-%m-%d") for date in dates], stats
def getBowlStats(scores):
dates, scorelist = [score[0] for score in scores], [score[1] for score in scores]
scorelist = np.array(scorelist)
cumscores = np.cumsum(scorelist, axis=0)
overs = cumscores[:, 0]
overs = overs.astype("int32") + 10 / 6 * (overs - overs.astype("int32"))
runs = cumscores[:, 2]
economy = runs / overs
wickets = cumscores[:, 3]
average = wickets / (runs + 1)
sr = wickets / overs
contrib = cumscores[:, 4] / np.arange(1, cumscores.shape[0] + 1)
stats = np.array([overs, average, economy, sr, contrib]).T
return [datetime.strptime(date, "%Y-%m-%d") for date in dates], stats
batsmen_stats = {key: getBatStats(getBatScores(v)) for key, v in batsmen.items()}
bowlers_stats = {key: getBowlStats(getBowlScores(v)) for key, v in bowlers.items()}
with open("../Data/scorecard.json", "r") as f:
scorecards = json.load(f)
position = dict()
for code, match in scorecards.items():
for pos, batsmen in enumerate(match["BATTING1"]):
if batsmen[0] in position:
position[batsmen[0]].append(pos + 1)
else:
position[batsmen[0]] = [pos + 1]
for pos, batsmen in enumerate(match["BATTING2"]):
if batsmen[0] in position:
position[batsmen[0]].append(pos + 1)
else:
position[batsmen[0]] = [pos + 1]
position = {int(k): max(set(v), key=v.count) for k, v in position.items()}
for missing in set(att["Code"]) - set(position.keys()):
position[missing] = 0
with open("../Data/region.json", "r") as f:
region = json.load(f)
with open("../Data/tmap.json", "r") as f:
tmap = json.load(f)
matches = pd.read_csv("../Data/matches.csv")
att["BatPos"] = att["Code"].apply(lambda x: position[x])
matches["GroundCode"] = matches["GroundCode"].apply(lambda x: region[str(x)])
matches = matches[pd.to_datetime(matches["Date"], format="%Y-%m-%d") > "1990-01-01"]
df_cards = pd.DataFrame(scorecards).transpose()
df_cards = df_cards[df_cards.index.astype(int).isin(matches["MatchCode"])]
matches = matches[matches["MatchCode"].isin(df_cards.index.astype(int))]
att = pd.get_dummies(att, columns=["BatPos"])
le = {
"GC": LE(),
"Team": LE(),
"Venue": LE(),
}
le["Team"].fit((matches["Team_1"].tolist()) + (matches["Team_2"].tolist()))
matches["Team_1"] = le["Team"].transform(matches["Team_1"])
matches["Team_2"] = le["Team"].transform(matches["Team_2"])
matches["Venue"] = le["Venue"].fit_transform(matches["Venue"])
matches["GroundCode"] = le["GC"].fit_transform(matches["GroundCode"])
matches
patts = att[
[
"BatHand",
"BowlHand",
"BowlType",
"BatPos_0",
"BatPos_1",
"BatPos_2",
"BatPos_3",
"BatPos_4",
"BatPos_5",
"BatPos_6",
"BatPos_7",
"BatPos_8",
"BatPos_9",
"BatPos_10",
]
].values
pcodes = att["Code"].tolist()
attdict = dict()
for i, pc in enumerate(pcodes):
attdict[pc] = patts[i]
df_cards["MatchCode"] = df_cards.index.astype(int)
matches = matches.sort_values(by="MatchCode")
df_cards = df_cards.sort_values(by="MatchCode")
df_cards.reset_index(drop=True, inplace=True)
matches.reset_index(drop=True, inplace=True)
df_cards["BAT2"] = le["Team"].transform(df_cards["ORDER"].apply(lambda x: tmap[x[1]]))
df_cards["BAT1"] = le["Team"].transform(df_cards["ORDER"].apply(lambda x: tmap[x[0]]))
df_cards["RUN1"] = df_cards["SCORES"].apply(lambda x: x[0])
df_cards["RUN2"] = df_cards["SCORES"].apply(lambda x: x[1])
df_cards["TOSS"] = le["Team"].transform(df_cards["TOSS"].apply(lambda x: tmap[x]))
df = | pd.merge(matches, df_cards) | pandas.merge |
from __future__ import annotations
from collections import abc
from datetime import datetime
from functools import partial
from itertools import islice
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
List,
Tuple,
TypedDict,
Union,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import tslib
from pandas._libs.tslibs import (
OutOfBoundsDatetime,
Timedelta,
Timestamp,
iNaT,
nat_strings,
parsing,
timezones,
)
from pandas._libs.tslibs.parsing import ( # noqa:F401
DateParseError,
format_is_iso,
guess_datetime_format,
)
from pandas._libs.tslibs.strptime import array_strptime
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DateTimeErrorChoices,
Timezone,
npt,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_object,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.missing import notna
from pandas.arrays import (
DatetimeArray,
IntegerArray,
)
from pandas.core import algorithms
from pandas.core.algorithms import unique
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype,
objects_to_datetime64ns,
tz_to_dtype,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.core.indexes.datetimes import DatetimeIndex
if TYPE_CHECKING:
from pandas._libs.tslibs.nattype import NaTType
from pandas._libs.tslibs.timedeltas import UnitChoices
from pandas import (
DataFrame,
Series,
)
# ---------------------------------------------------------------------
# types used in annotations
ArrayConvertible = Union[List, Tuple, AnyArrayLike]
Scalar = Union[int, float, str]
DatetimeScalar = Union[Scalar, datetime]
DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
DatetimeDictArg = Union[List[Scalar], Tuple[Scalar, ...], AnyArrayLike]
class YearMonthDayDict(TypedDict, total=True):
year: DatetimeDictArg
month: DatetimeDictArg
day: DatetimeDictArg
class FulldatetimeDict(YearMonthDayDict, total=False):
hour: DatetimeDictArg
hours: DatetimeDictArg
minute: DatetimeDictArg
minutes: DatetimeDictArg
second: DatetimeDictArg
seconds: DatetimeDictArg
ms: DatetimeDictArg
us: DatetimeDictArg
ns: DatetimeDictArg
DictConvertible = Union[FulldatetimeDict, "DataFrame"]
start_caching_at = 50
# ---------------------------------------------------------------------
def _guess_datetime_format_for_array(arr, dayfirst: bool | None = False):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return guess_datetime_format(arr[non_nan_elements[0]], dayfirst=dayfirst)
def should_cache(
arg: ArrayConvertible, unique_share: float = 0.7, check_count: int | None = None
) -> bool:
"""
Decides whether to do caching.
If the percent of unique elements among `check_count` elements less
than `unique_share * 100` then we can do caching.
Parameters
----------
arg: listlike, tuple, 1-d array, Series
unique_share: float, default=0.7, optional
0 < unique_share < 1
check_count: int, optional
0 <= check_count <= len(arg)
Returns
-------
do_caching: bool
Notes
-----
By default for a sequence of less than 50 items in size, we don't do
caching; for the number of elements less than 5000, we take ten percent of
all elements to check for a uniqueness share; if the sequence size is more
than 5000, then we check only the first 500 elements.
All constants were chosen empirically by.
"""
do_caching = True
# default realization
if check_count is None:
# in this case, the gain from caching is negligible
if len(arg) <= start_caching_at:
return False
if len(arg) <= 5000:
check_count = len(arg) // 10
else:
check_count = 500
else:
assert (
0 <= check_count <= len(arg)
), "check_count must be in next bounds: [0; len(arg)]"
if check_count == 0:
return False
assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)"
try:
# We can't cache if the items are not hashable.
unique_elements = set(islice(arg, check_count))
except TypeError:
return False
if len(unique_elements) > check_count * unique_share:
do_caching = False
return do_caching
def _maybe_cache(
arg: ArrayConvertible,
format: str | None,
cache: bool,
convert_listlike: Callable,
) -> Series:
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : listlike, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : bool
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series(dtype=object)
if cache:
# Perform a quicker unique check
if not should_cache(arg):
return cache_array
unique_dates = unique(arg)
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, format)
cache_array = Series(cache_dates, index=unique_dates)
# GH#39882 and GH#35888 in case of None and NaT we get duplicates
if not cache_array.index.is_unique:
cache_array = cache_array[~cache_array.index.duplicated()]
return cache_array
def _box_as_indexlike(
dt_array: ArrayLike, utc: bool | None = None, name: Hashable = None
) -> Index:
"""
Properly boxes the ndarray of datetimes to DatetimeIndex
if it is possible or to generic Index instead
Parameters
----------
dt_array: 1-d array
Array of datetimes to be wrapped in an Index.
tz : object
None or 'utc'
name : string, default None
Name for a resulting index
Returns
-------
result : datetime of converted dates
- DatetimeIndex if convertible to sole datetime64 type
- general Index otherwise
"""
if is_datetime64_dtype(dt_array):
tz = "utc" if utc else None
return DatetimeIndex(dt_array, tz=tz, name=name)
return Index(dt_array, name=name, dtype=dt_array.dtype)
def _convert_and_box_cache(
arg: DatetimeScalarOrArrayConvertible,
cache_array: Series,
name: str | None = None,
) -> Index:
"""
Convert array of dates with a cache and wrap the result in an Index.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : Index-like of converted dates
"""
from pandas import Series
result = Series(arg).map(cache_array)
return _box_as_indexlike(result._values, utc=None, name=name)
def _return_parsed_timezone_results(result: np.ndarray, timezones, tz, name) -> Index:
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray[int64]
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : Index-like of parsed dates with timezone
"""
tz_results = np.array(
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
)
if tz is not None:
# Convert to the same tz
tz_results = np.array([tz_result.tz_convert(tz) for tz_result in tz_results])
return Index(tz_results, name=name)
def _convert_listlike_datetimes(
arg,
format: str | None,
name: Hashable = None,
tz: Timezone | None = None,
unit: str | None = None,
errors: str = "raise",
infer_datetime_format: bool = False,
dayfirst: bool | None = None,
yearfirst: bool | None = None,
exact: bool = True,
):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parsed
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : str
None or string of the frequency of the passed data
errors : str
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : bool, default False
inferring format behavior from to_datetime
dayfirst : bool
dayfirst parsing behavior from to_datetime
yearfirst : bool
yearfirst parsing behavior from to_datetime
exact : bool, default True
exact format matching behavior from to_datetime
Returns
-------
Index-like of parsed dates
"""
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
arg_dtype = getattr(arg, "dtype", None)
# these are shortcutable
if is_datetime64tz_dtype(arg_dtype):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == "utc":
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg_dtype):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
elif tz:
# DatetimeArray, DatetimeIndex
return arg.tz_localize(tz)
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
return _to_datetime_with_unit(arg, unit, name, tz, errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
try:
arg, _ = maybe_convert_dtype(arg, copy=False, tz=timezones.maybe_get_tz(tz))
except TypeError:
if errors == "coerce":
npvalues = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg))
return DatetimeIndex(npvalues, name=name)
elif errors == "ignore":
idx = Index(arg, name=name)
return idx
raise
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
if format is not None:
res = _to_datetime_with_format(
arg, orig_arg, name, tz, format, exact, errors, infer_datetime_format
)
if res is not None:
return res
assert format is None or infer_datetime_format
utc = tz == "utc"
result, tz_parsed = objects_to_datetime64ns(
arg,
dayfirst=dayfirst,
yearfirst=yearfirst,
utc=utc,
errors=errors,
require_iso8601=require_iso8601,
allow_object=True,
)
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed))
return DatetimeIndex._simple_new(dta, name=name)
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
def _array_strptime_with_fallback(
arg,
name,
tz,
fmt: str,
exact: bool,
errors: str,
infer_datetime_format: bool,
) -> Index | None:
"""
Call array_strptime, with fallback behavior depending on 'errors'.
"""
utc = tz == "utc"
try:
result, timezones = array_strptime(arg, fmt, exact=exact, errors=errors)
except OutOfBoundsDatetime:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(iNaT)
else:
result = arg
except ValueError:
# if fmt was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(iNaT)
else:
result = arg
else:
# Indicates to the caller to fallback to objects_to_datetime64ns
return None
else:
if "%Z" in fmt or "%z" in fmt:
return _return_parsed_timezone_results(result, timezones, tz, name)
return _box_as_indexlike(result, utc=utc, name=name)
def _to_datetime_with_format(
arg,
orig_arg,
name,
tz,
fmt: str,
exact: bool,
errors: str,
infer_datetime_format: bool,
) -> Index | None:
"""
Try parsing with the given format, returning None on failure.
"""
result = None
# shortcut formatting here
if fmt == "%Y%m%d":
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
try:
# may return None without raising
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, OutOfBoundsDatetime) as err:
raise ValueError(
"cannot convert the input to '%Y%m%d' date format"
) from err
if result is not None:
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
# fallback
res = _array_strptime_with_fallback(
arg, name, tz, fmt, exact, errors, infer_datetime_format
)
return res
def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index:
"""
to_datetime specalized to the case where a 'unit' is passed.
"""
arg = extract_array(arg, extract_numpy=True)
# GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime
# because it expects an ndarray argument
if isinstance(arg, IntegerArray):
arr = arg.astype(f"datetime64[{unit}]")
tz_parsed = None
else:
arg = np.asarray(arg)
arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if errors == "ignore":
# Index constructor _may_ infer to DatetimeIndex
result = Index._with_infer(arr, name=name)
else:
result = DatetimeIndex(arr, name=name)
if not isinstance(result, DatetimeIndex):
return result
# GH#23758: We may still need to localize the result with tz
# GH#25546: Apply tz_parsed first (from arg), then tz (from caller)
# result will be naive but in UTC
result = result.tz_localize("UTC").tz_convert(tz_parsed)
if tz is not None:
if result.tz is None:
result = result.tz_localize(tz)
else:
result = result.tz_convert(tz)
return result
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : str
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == "julian":
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != "D":
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError as err:
raise ValueError(
"incompatible 'arg' type for given 'origin'='julian'"
) from err
# preemptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise OutOfBoundsDatetime(
f"{original} is Out of Bounds for origin='julian'"
)
else:
# arg must be numeric
if not (
(is_scalar(arg) and (is_integer(arg) or is_float(arg)))
or is_numeric_dtype(np.asarray(arg))
):
raise ValueError(
f"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified"
)
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except OutOfBoundsDatetime as err:
raise OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") from err
except ValueError as err:
raise ValueError(
f"origin {origin} cannot be converted to a Timestamp"
) from err
if offset.tz is not None:
raise ValueError(f"origin offset {offset} must be tz-naive")
td_offset = offset - Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
ioffset = td_offset // Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)):
arg = np.asarray(arg)
arg = arg + ioffset
return arg
@overload
def to_datetime(
arg: DatetimeScalar,
errors: DateTimeErrorChoices = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool | None = ...,
format: str | None = ...,
exact: bool = ...,
unit: str | None = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> Timestamp:
...
@overload
def to_datetime(
arg: Series | DictConvertible,
errors: DateTimeErrorChoices = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool | None = ...,
format: str | None = ...,
exact: bool = ...,
unit: str | None = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> Series:
...
@overload
def to_datetime(
arg: list | tuple | Index | ArrayLike,
errors: DateTimeErrorChoices = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool | None = ...,
format: str | None = ...,
exact: bool = ...,
unit: str | None = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> DatetimeIndex:
...
def to_datetime(
arg: DatetimeScalarOrArrayConvertible | DictConvertible,
errors: DateTimeErrorChoices = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: bool | None = None,
format: str | None = None,
exact: bool = True,
unit: str | None = None,
infer_datetime_format: bool = False,
origin="unix",
cache: bool = True,
) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:
"""
Convert argument to datetime.
This function converts a scalar, array-like, :class:`Series` or
:class:`DataFrame`/dict-like to a pandas datetime object.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime. If a :class:`DataFrame` is provided, the
method expects minimally the following columns: :const:`"year"`,
:const:`"month"`, :const:`"day"`.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception.
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
- If :const:`'ignore'`, then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
is parsed as :const:`2012-11-10`.
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
with day first. If a delimited date string cannot be parsed in
accordance with the given `dayfirst` option, e.g.
``to_datetime(['31-12-2021'])``, then a warning will be shown.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
- If :const:`True` parses dates with the year first, e.g.
:const:`"10/11/12"` is parsed as :const:`2010-11-12`.
- If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
preceded (same as :mod:`dateutil`).
.. warning::
``yearfirst=True`` is not strict, but will prefer to parse
with year first.
utc : bool, default None
Control timezone-related parsing, localization and conversion.
- If :const:`True`, the function *always* returns a timezone-aware
UTC-localized :class:`Timestamp`, :class:`Series` or
:class:`DatetimeIndex`. To do this, timezone-naive inputs are
*localized* as UTC, while timezone-aware inputs are *converted* to UTC.
- If :const:`False` (default), inputs will not be coerced to UTC.
Timezone-naive inputs will remain naive, while timezone-aware ones
will keep their time offsets. Limitations exist for mixed
offsets (typically, daylight savings), see :ref:`Examples
<to_datetime_tz_examples>` section for details.
See also: pandas general documentation about `timezone conversion and
localization
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#time-zone-handling>`_.
format : str, default None
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. Note that
:const:`"%f"` will parse all the way up to nanoseconds. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices.
exact : bool, default True
Control how `format` is used:
- If :const:`True`, require an exact `format` match.
- If :const:`False`, allow the `format` to match anywhere in the target
string.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If :const:`True` and no `format` is given, attempt to infer the format
of the datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
- If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
beginning of Julian Calendar. Julian day number :const:`0` is assigned
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
duplicate date strings, especially ones with timezone offsets. The cache
is only used when there are at least 50 values. The presence of
out-of-bounds values will render the cache unusable and may slow down
parsing.
.. versionchanged:: 0.25.0
changed default value from :const:`False` to :const:`True`.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input (types in parenthesis correspond to
fallback in case of unsuccessful timezone or out-of-range timestamp
parsing):
- scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
- array-like: :class:`DatetimeIndex` (or :class:`Series` with
:class:`object` dtype containing :class:`datetime.datetime`)
- Series: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
- DataFrame: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
Raises
------
ParserError
When parsing a date from string fails.
ValueError
When another datetime conversion error happens. For example when one
of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
when a Timezone-aware :class:`datetime.datetime` is found in an array-like
of mixed time offsets, and ``utc=False``.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Notes
-----
Many input types are supported, and lead to different output types:
- **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
module or :mod:`numpy`). They are converted to :class:`Timestamp` when
possible, otherwise they are converted to :class:`datetime.datetime`.
None/NaN/null scalars are converted to :const:`NaT`.
- **array-like** can contain int, float, str, datetime objects. They are
converted to :class:`DatetimeIndex` when possible, otherwise they are
converted to :class:`Index` with :class:`object` dtype, containing
:class:`datetime.datetime`. None/NaN/null entries are converted to
:const:`NaT` in both cases.
- **Series** are converted to :class:`Series` with :class:`datetime64`
dtype when possible, otherwise they are converted to :class:`Series` with
:class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
entries are converted to :const:`NaT` in both cases.
- **DataFrame/dict-like** are converted to :class:`Series` with
:class:`datetime64` dtype. For each row a datetime is created from assembling
the various dataframe columns. Column keys can be common abbreviations
like [‘year’, ‘month’, ‘day’, ‘minute’, ‘second’, ‘ms’, ‘us’, ‘ns’]) or
plurals of the same.
The following causes are responsible for :class:`datetime.datetime` objects
being returned (possibly inside an :class:`Index` or a :class:`Series` with
:class:`object` dtype) instead of a proper pandas designated type
(:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
with :class:`datetime64` dtype):
- when any input element is before :const:`Timestamp.min` or after
:const:`Timestamp.max`, see `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_.
- when ``utc=False`` (default) and the input is an array-like or
:class:`Series` containing mixed naive/aware datetime, or aware with mixed
time offsets. Note that this happens in the (quite frequent) situation when
the timezone has a daylight savings policy. In that case you may wish to
use ``utc=True``.
Examples
--------
**Handling various input formats**
Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
Passing ``infer_datetime_format=True`` can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s, infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s, infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
**Non-convertible date/times**
If a date does not meet the `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing ``errors='ignore'``
will return the original input instead of raising any exception.
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
.. _to_datetime_tz_examples:
**Timezones and time offsets**
The default behaviour (``utc=False``) is as follows:
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00:15'])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, pytz.FixedOffset(-300)]', freq=None)
- However, timezone-aware inputs *with mixed time offsets* (for example
issued from a timezone with daylight savings, such as Europe/Paris)
are **not successfully converted** to a :class:`DatetimeIndex`. Instead a
simple :class:`Index` containing :class:`datetime.datetime` objects is
returned:
>>> pd.to_datetime(['2020-10-25 02:00 +0200', '2020-10-25 04:00 +0100'])
Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
dtype='object')
- A mix of timezone-aware and timezone-naive inputs is converted to
a timezone-aware :class:`DatetimeIndex` if the offsets of the timezone-aware
are constant:
>>> from datetime import datetime
>>> pd.to_datetime(["2020-01-01 01:00 -01:00", datetime(2020, 1, 1, 3, 0)])
DatetimeIndex(['2020-01-01 01:00:00-01:00', '2020-01-01 02:00:00-01:00'],
dtype='datetime64[ns, pytz.FixedOffset(-60)]', freq=None)
|
Setting ``utc=True`` solves most of the above issues:
- Timezone-naive inputs are *localized* as UTC
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
>>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
... utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both naive and aware, string or datetime, the above
rules still apply
>>> from datetime import timezone, timedelta
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 12:00 -0530',
... datetime(2020, 1, 1, 18),
... datetime(2020, 1, 1, 18,
... tzinfo=timezone(-timedelta(hours=1)))],
... utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 17:30:00+00:00',
'2020-01-01 18:00:00+00:00', '2020-01-01 19:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
tz = "utc" if utc else None
convert_listlike = partial(
_convert_listlike_datetimes,
tz=tz,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
infer_datetime_format=infer_datetime_format,
)
result: Timestamp | NaTType | Series | Index
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = arg.tz_convert(tz)
else:
result = arg.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, tz)
elif isinstance(arg, Index):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, name=arg.name)
else:
result = convert_listlike(arg, format, name=arg.name)
elif | is_list_like(arg) | pandas.core.dtypes.common.is_list_like |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
| pd.Timestamp('2012-01-02') | pandas.Timestamp |
import pandas as pd
from Bio.PDB import Selection, PDBParser
# from Bio.PDB.vectors import rotmat, Vector
import numpy as np
"""
PDB file --> beads center DataFrame --> local structure --> rotated local structure
Functions in this version can handle multiple chains PDB file.
"""
def get_bead_center(residue):
# return CA coordinates if residue is GLY
ca_coord = residue['CA'].get_coord()
if residue.get_resname() == "GLY":
return ca_coord
# for other residues, return mean of CA coord and side chain mass centroid
backbone_atoms = {'N', 'CA', 'C', 'O'}
atom_mass = {'C': 12.0, 'N': 14.0, 'O': 16.0, 'S': 32.0, 'H': 1.0}
weighted_coord = np.array([0.0, 0.0, 0.0])
total_weight = 0
for atom in residue.get_atoms():
if atom.get_name() not in backbone_atoms:
weight = atom_mass[atom.element]
coord = atom.get_coord()
weighted_coord += weight * coord
total_weight += weight
side_chain_center = weighted_coord / total_weight
bead_center = (ca_coord + side_chain_center) / 2
return bead_center
def extract_beads(pdb_file):
"""
convert PDB to pandas dataframe
:param pdb_file:
:return:
"""
amino_acids = pd.read_csv('data/amino_acids.csv')
vocab_aa = [x.upper() for x in amino_acids.AA3C]
p = PDBParser()
structure = p.get_structure('X', f'data/dock/pdb/{pdb_file}.pdb')
residue_list = Selection.unfold_entities(structure, 'R')
bead_center_list = []
res_name_list = []
res_num_list = []
chain_list = []
for res in residue_list:
if res.get_resname() not in vocab_aa:
# raise ValueError('protein has non natural amino acids')
continue
chain_list.append(res.parent.id)
res_name_list.append(res.get_resname())
res_num_list.append(res.id[1])
bead_center = get_bead_center(res)
bead_center_list.append(bead_center)
g_center = np.vstack(bead_center_list)
df = pd.DataFrame({'chain_id': chain_list,
'group_num': res_num_list,
'group_name': res_name_list,
'x': g_center[:, 0],
'y': g_center[:, 1],
'z': g_center[:, 2]})
df.to_csv(f'data/dock/beads/{pdb_file}_bead.csv', index=False)
def _rotation_matrix(c1, c2):
z = np.cross(c1, c2)
x = c1
y = np.cross(z, x)
x = x / np.sqrt(np.sum(x ** 2))
y = y / np.sqrt(np.sum(y ** 2))
z = z / np.sqrt(np.sum(z ** 2))
R = np.vstack([x, y, z])
# Rinv = np.linalg.inv(R.T)
return R
def rotate_one(fname):
df_list = []
df = pd.read_csv(f'data/dock/local/{fname}.csv')
center_num = df['center_num'].unique()
for g in center_num:
df_g = df[df['center_num'] == g]
df_g = df_g.sort_values(by=['chain_id', 'group_num'])
g_chain = df_g['chain_id'].values
g_group_num = df_g['group_num'].values
n_res = df_g.shape[0]
idx = np.arange(n_res)
# locate the central residue
center_idx = (g_chain == g[0]) & (g_group_num == int(g[1:]))
i = idx[center_idx][0]
if (i == 0) | (i == n_res-1):
# i is the smallest or largest, so can't find previous or next group for the coordinates calculation
# continue
raise ValueError('can not find previous or next group')
# make sure the previous and next residues are from the same chain
assert(g_chain[i-1] == g[0])
assert(g_chain[i+1] == g[0])
coords = df_g[['x', 'y', 'z']].values
coords = coords - coords[i] # center
# coords of the previous and next group in local peptide
c1 = coords[i-1]
c2 = coords[i+1]
rotate_mat = _rotation_matrix(c1, c2)
coords = np.squeeze(np.matmul(rotate_mat[None, :, :], coords[:, :, None]))
distance = np.sqrt(np.sum(coords**2, axis=1))
segment_info = np.ones(n_res, dtype=int) * 3
segment_info[i] = 0
segment_info[i-1] = 1
segment_info[i+1] = 2
df_g = pd.DataFrame({'center_num': df_g['center_num'],
'chain_id': df_g['chain_id'],
'group_num': df_g['group_num'],
'group_name': df_g['group_name'],
'x': coords[:, 0],
'y': coords[:, 1],
'z': coords[:, 2],
'distance': distance,
'segment': segment_info})
df_g = df_g.sort_values(by='distance')
df_list.append(df_g)
df = pd.concat(df_list, ignore_index=True)
df['num'] = np.arange(center_num.shape[0]).repeat(10, axis=0)
df.to_csv(f'data/dock/local_rot/{fname}_rot.csv', index=False)
def extract_one_topk(fname, k=10):
df = | pd.read_csv(f'data/dock/beads/{fname}_bead.csv', dtype={'chain_id': str, 'group_num': int}) | pandas.read_csv |
import argparse
import os
import itertools
import logging
import pandas as pd
from tqdm import tqdm
from src.analysis.utils import \
load_squadv2_dev_as_df, \
squad2_evaluation, \
load_squadv1_dev_as_df
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
def create_filepath_dict(model_filepath: str) -> dict:
"""
:param model_filepath: Filepath containing model checkpoints
:return: Dict with checkpoint numbers as keys and corresponding path as values
"""
checkpoint_str = list(
os.walk(model_filepath)
)[0][1]
checkpoint_nbr = [int(x.split('-')[-1]) for x in checkpoint_str]
checkpoint_fp = [f'{model_filepath}/{x}/eval_predictions.json' for x in checkpoint_str]
prediction_filepath_dict = dict(zip(checkpoint_nbr, checkpoint_fp))
return prediction_filepath_dict
def generate_predictions_df(model_filepath: str, seed: int):
"""
Generate DataFrame of predictions by checkpoint and seed from raw JSON output files
"""
logging.info('Loading predictions data')
prediction_filepath_dict = create_filepath_dict(model_filepath)
predictions_df = pd.DataFrame()
for checkpoint, fp in prediction_filepath_dict.items():
eval_predictions_df = pd.read_json(fp, orient='index').reset_index()
eval_predictions_df.rename(columns={'index': 'id', 0: "prediction_text"}, inplace=True)
eval_predictions_df['checkpoint'] = checkpoint
eval_predictions_df['seed'] = seed
predictions_df = predictions_df.append(eval_predictions_df)
return predictions_df
def generate_metrics_by_category_df(
full_df: pd.DataFrame,
overall_metrics_df: pd.DataFrame,
category_label: str,
save: bool = False,
savepath: str = None
) -> pd.DataFrame:
"""
Compute metrics by categorical group.
:param full_df: DataFrame containing 'num_examples', '{category_label}', 'seed', 'id', 'prediction_text', 'answers'
columns
:param overall_metrics_df: DataFrame containing overall metrics (over all examples) by checkpoint and seed
:param category_label: Name of column containing example categories
:param save: Save output DataFrame as CSV
:param savepath: Filepath to save to incl. extension
:return: DataFrame
"""
full_df = full_df.copy()
full_metrics = []
for checkpoint, label in tqdm(
list(
itertools.product(
full_df['checkpoint'].unique(),
full_df[category_label].unique()
)
)
):
full_df_subset = full_df.copy()[
(full_df[category_label] == label) &
(full_df['checkpoint'] == checkpoint)
]
id_list = list(full_df_subset['id'])
prediction_text_list = list(full_df_subset['prediction_text'])
answers_list = list(full_df_subset['answers'])
metrics = squad2_evaluation(
id_list=id_list,
prediction_text_list=prediction_text_list,
answers_list=answers_list
)
metrics[category_label] = label
metrics['checkpoint'] = checkpoint
full_metrics.append(metrics)
full_metrics_df = | pd.DataFrame(full_metrics) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import nltk
tokenizer = nltk.RegexpTokenizer(r"\w+")
from nltk.corpus import stopwords
nltk.download('stopwords')
from nltk.stem import PorterStemmer
ps = PorterStemmer()
from collections import defaultdict
import pickle
import math
from tqdm import tqdm # monitoring progress
import time
from joblib import Parallel, delayed # parallel processing
## Create folders -----------------------------------------------------------------------------------------------------------------------/
def createFolders(nameMainFolder,numberSubFolders):
for k in range (1, numberSubFolders):
path = '{}/page_{}'.format(nameMainFolder, k)
os.makedirs(path)
## Get htmls by urls -----------------------------------------------------------------------------------------------------------------------/
#these data are useful because they allow us to dinwload more data without seem bot for the server
headers = {
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36",
'accept': "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
'referer': "https://myanimelist.net/"
}
def htmls_by_urls(urls_txt, folder):
# urls_txt: string 'https.txt' from previous task
# folder: string; eg '/Users/anton/Desktop/ADM/Homework3/html'
with open(urls_txt, 'r', encoding='utf-8') as f:
lines = f.readlines()
# list of urls
list_txt = [line.strip() for line in lines]
i = 0 #through this index you can chose the start point of import
while i < len(list_txt):
url = list_txt[i]
# folder where we save html
al_folder = '{}/page_{}/{}.html'.format(folder, i//50 +1, i+1)
# download html
html = requests.get(url, headers)
print(i)
if(html.status_code != 200) :
time.sleep(120)
print('error', html.status_code)
else:
i += 1
with open(al_folder, 'w', encoding='utf-8') as g:
g.write(html.text)
def retriveTSV(folder):
tsvfile = os.listdir(folder)
tsvfile = [folder+"/"+ i for i in tsvfile if i.endswith('.tsv')]
dataset = | pd.read_csv(tsvfile[0],sep='\t') | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from prereise.gather.hydrodata.eia.decompose_profile import get_profile_by_state
def test_get_profile_argument_type():
arg = ((1, "WA"), (pd.Series(dtype=np.float64), 1))
for a in arg:
with pytest.raises(TypeError):
get_profile_by_state(a[0], a[1])
def test_get_profile_argument_value():
a = ( | pd.Series(dtype=np.float64) | pandas.Series |
import pandas as pd
import numpy as np
from datetime import datetime
###############
# SELECT DATA #
###############
print("Selecting attributes...")
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/raw/GIT_COMMITS.csv")
attributes = ['projectID', 'commitHash', 'author', 'committer', 'committerDate']
gitCommits = gitCommits[attributes]
gitCommits.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/raw/GIT_COMMITS_CHANGES.csv")
attributes = ['projectID', 'commitHash', 'changeType', 'linesAdded', 'linesRemoved']
gitCommitsChanges = gitCommitsChanges[attributes]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/raw/JIRA_ISSUES.csv")
attributes = ['projectID', 'key', 'creationDate', 'resolutionDate', 'type', 'priority', 'assignee', 'reporter']
jiraIssues = jiraIssues[attributes]
jiraIssues.to_csv('../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/raw/REFACTORING_MINER.csv")
attributes = ['projectID', 'commitHash', 'refactoringType']
refactoringMiner = refactoringMiner[attributes]
refactoringMiner.to_csv('../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/raw/SONAR_ISSUES.csv")
attributes = ['projectID', 'creationDate', 'closeDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity',
'debt', 'author']
sonarIssues = sonarIssues[attributes]
sonarIssues.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv', header=True)
# SONAR_MEASURES
sonarMeasures = pd.read_csv("../../data/raw/SONAR_MEASURES.csv")
attributes = ['commitHash', 'projectID', 'functions', 'commentLinesDensity', 'complexity', 'functionComplexity', 'duplicatedLinesDensity',
'violations', 'blockerViolations', 'criticalViolations', 'infoViolations', 'majorViolations', 'minorViolations', 'codeSmells',
'bugs', 'vulnerabilities', 'cognitiveComplexity', 'ncloc', 'sqaleIndex', 'sqaleDebtRatio', 'reliabilityRemediationEffort', 'securityRemediationEffort']
sonarMeasures = sonarMeasures[attributes]
sonarMeasures.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_MEASURES_select.csv', header=True)
# SZZ_FAULT_INDUCING_COMMITS
szzFaultInducingCommits = pd.read_csv("../../data/raw/SZZ_FAULT_INDUCING_COMMITS.csv")
attributes = ['projectID', 'faultFixingCommitHash', 'faultInducingCommitHash', 'key']
szzFaultInducingCommits = szzFaultInducingCommits[attributes]
szzFaultInducingCommits.to_csv('../../data/interim/DataPreparation/SelectData/SZZ_FAULT_INDUCING_COMMITS_select.csv', header=True)
print("Attributes selected.")
##############
# CLEAN DATA #
##############
print("Cleaning data...")
def intersection(l1, l2):
temp = set(l2)
l3 = [value for value in l1 if value in temp]
return l3
def difference(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv")
authorNan = list(np.where(gitCommits.author.isna()))[0]
committerNan = list(np.where(gitCommits.committer.isna()))[0]
inters = intersection(authorNan, committerNan)
gitCommits = gitCommits.drop(inters)
gitCommits.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv").iloc[:,1:]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_CHANGES_clean.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv").iloc[:,1:]
resolutionDate_nan = list(np.where(jiraIssues.resolutionDate.isna()))[0]
jiraIssues_notresolved = jiraIssues.iloc[resolutionDate_nan,:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv").iloc[:,[1,-1]]
lastTimestamp = gitCommits.groupby(['projectID']).max()
jiraIssues_notresolved = pd.merge(jiraIssues_notresolved, lastTimestamp, how='left', on='projectID')
jiraIssues_notresolved = jiraIssues_notresolved.iloc[:,[0,1,2,4,5,6,7,8]].rename(columns={'committerDate': 'resolutionDate'})
jiraIssues_resolved = jiraIssues.drop(resolutionDate_nan)
jiraIssues = pd.concat([jiraIssues_resolved, jiraIssues_notresolved], sort=False).sort_index().reset_index().iloc[:,1:]
priority_nan = list(np.where(jiraIssues.priority.isna()))[0]
jiraIssues = jiraIssues.drop(priority_nan)
assignee_nan = list(np.where(jiraIssues.assignee.isna()))[0]
jiraIssues.assignee = jiraIssues.assignee.fillna('not-assigned')
jiraIssues.to_csv('../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv")
commitHashNan = list(np.where(refactoringMiner.commitHash.isna()))[0]
refactoringTypeNan = list(np.where(refactoringMiner.refactoringType.isna()))[0]
inters = intersection(commitHashNan, refactoringTypeNan)
refactoringMiner = refactoringMiner.drop(inters)
refactoringMiner.to_csv('../../data/interim/DataPreparation/CleanData/REFACTORING_MINER_clean.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv").iloc[:,1:]
closeDateNan = list(np.where(sonarIssues.closeDate.isna()))[0]
closeCommitHashNan = list(np.where(sonarIssues.closeCommitHash.isna()))[0]
debtNan = list(np.where(sonarIssues.debt.isna()))[0]
authorNan = list(np.where(sonarIssues.author.isna()))[0]
inter = intersection(closeDateNan, closeCommitHashNan)
diff = difference(closeCommitHashNan, closeDateNan)
debtNan = list(np.where(sonarIssues.debt.isna())[0])
sonarIssues = sonarIssues.drop(debtNan).reset_index()
sonarIssues = sonarIssues.fillna({'closeCommitHash': 'not-resolved'})
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:,1:]
lastTimestamp = gitCommits.loc[:,['projectID', 'committerDate']].groupby(['projectID']).max()
closeDateNan = list(np.where(sonarIssues.closeDate.isna()))[0]
sonarIssues_notresolved = sonarIssues.iloc[closeDateNan,:]
sonarIssues_notresolved = pd.merge(sonarIssues_notresolved, lastTimestamp, how='left', on='projectID')
sonarIssues_notresolved = sonarIssues_notresolved.loc[:,['projectID', 'creationDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity', 'debt', 'author', 'committerDate']].rename(columns={'committerDate': 'closeDate'})
sonarIssues_resolved = sonarIssues.drop(closeDateNan)
sonarIssues = pd.concat([sonarIssues_resolved, sonarIssues_notresolved], sort=False).sort_index().reset_index().iloc[:,1:]
sonarIssues.groupby(['author']).size().reset_index().rename(columns={0:'count'})
df1 = gitCommits[['commitHash', 'committer']]
df2 = (sonarIssues[['creationCommitHash', 'author']]).rename(columns={'creationCommitHash': 'commitHash'})
merge = pd.merge(df1, df2, on='commitHash', how='inner').drop_duplicates()
pairs = merge.groupby(['committer', 'author']).size().reset_index().rename(columns={0:'count'})
index1 = list(np.where(pairs.committer.value_counts()==1))[0]
committer_1 = (pairs.committer.value_counts())[index1].index
index2 = list(np.where(pairs.author.value_counts()==1))[0]
author_1 = (pairs.author.value_counts())[index2].index
index_author_1 = pairs.loc[pairs['author'].isin(author_1)].index
index_committer_1 = pairs.loc[pairs['committer'].isin(committer_1)].index
inter_pairs = intersection(index_author_1, index_committer_1)
pairs_unique = pairs.loc[inter_pairs]
commiters = list(pairs_unique.committer)
authors = list(pairs_unique.author)
merge2 = pd.merge(merge, pairs_unique, on='committer', how='inner')
merge2 = merge2[['commitHash', 'committer', 'author_y']].rename(columns={'author_y': 'author', 'commitHash': 'creationCommitHash'})
merge2 = merge2.drop_duplicates()
prova2 = merge2[['creationCommitHash', 'author']]
dictionary = prova2.set_index('creationCommitHash').T.to_dict('records')[0]
sonarIssues.author = sonarIssues.author.fillna(sonarIssues.creationCommitHash.map(dictionary))
sonarIssues = sonarIssues.dropna(subset=['author'])
sonarIssues = sonarIssues.iloc[:,1:]
sonarIssues.to_csv('../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv', header=True)
# SONAR_MEASURES
sonarMeasures = pd.read_csv("../../data/interim/DataPreparation/SelectData/SONAR_MEASURES_select.csv")
sonarMeasures.to_csv('../../data/interim/DataPreparation/CleanData/SONAR_MEASURES_clean.csv', header=True)
# SZZ_FAULT_INDUCING_COMMITS
szzFaultInducingCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/SZZ_FAULT_INDUCING_COMMITS_select.csv").iloc[:,1:]
szzFaultInducingCommits.to_csv('../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv', header=True)
print("Data cleaned.")
##################
# CONSTRUCT DATA #
##################
print("Constructing data...")
def produce_bug(x):
if pd.isna(x.faultFixingCommitHash):
return False
return True
# COMMITS_FREQUENCY
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
gitCommits = gitCommits[['committer', 'committerDate']]
newDFsorted = gitCommits.sort_values(by=['committer', 'committerDate']).reset_index()[['committer', 'committerDate']]
newDFsortedCopy = []
committer = newDFsorted.iloc[0,0]
for index, row in newDFsorted.iterrows():
if index != 0:
if committer == newDFsorted.iloc[index,0]:
r = (pd.to_datetime(newDFsorted.iloc[index,1])-pd.to_datetime(newDFsorted.iloc[index-1,1]))
newDFsortedCopy.append([committer, r])
else:
committer = newDFsorted.iloc[index,0]
time_between_commits = pd.DataFrame(newDFsortedCopy)
time_between_commits[1] = time_between_commits[1].dt.total_seconds()
time_between_commits_commiter = time_between_commits.groupby([0]).mean()
time_between_commits_commiter = pd.DataFrame(time_between_commits_commiter).rename(columns={0:'committer', 1:'time_between_commits'})
time_between_commits_commiter.to_csv('../../data/interim/DataPreparation/ConstructData/COMMITS_FREQUENCY.csv', header=True)
# FIXED_ISSUES
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
SZZcommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv")
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv")
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv")
SZZcommits = SZZcommits['faultFixingCommitHash']
gitCommits = gitCommits[['commitHash', 'committer']]
sonarIssues = sonarIssues['closeCommitHash']
jiraIssues = jiraIssues['assignee']
SZZ_issues = (pd.merge(SZZcommits, gitCommits, how='inner', left_on='faultFixingCommitHash', right_on='commitHash').drop_duplicates())[['commitHash', 'committer']]
SSZ_issue_committer = SZZ_issues.committer.value_counts().rename_axis('committer').reset_index(name='SZZIssues')
Sonar_issues = pd.merge(sonarIssues, gitCommits, how='inner', left_on='closeCommitHash', right_on='commitHash').drop_duplicates()[['commitHash', 'committer']]
Sonar_issues_committer = Sonar_issues.committer.value_counts().rename_axis('committer').reset_index(name='SonarIssues')
Jira_issues_committer = jiraIssues[jiraIssues != 'not-assigned'].value_counts().rename_axis('committer').reset_index(name='JiraIssues')
issues = pd.merge(SSZ_issue_committer, Sonar_issues_committer, on='committer', how='outer')
issues = pd.merge(issues, Jira_issues_committer, on='committer', how='outer')
issues = issues.fillna(0)
issues.to_csv('../../data/interim/DataPreparation/ConstructData/FIXED_ISSUES.csv', header=True)
# INDUCED_ISSUES
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
SZZcommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv")
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv")
SZZcommits = SZZcommits['faultInducingCommitHash']
gitCommits = gitCommits[['commitHash', 'committer']]
sonarIssues = sonarIssues['creationCommitHash']
SZZ_issues = (pd.merge(SZZcommits, gitCommits, how='inner', left_on='faultInducingCommitHash', right_on='commitHash').drop_duplicates())[['commitHash', 'committer']]
SSZ_issue_committer = SZZ_issues.committer.value_counts().rename_axis('committer').reset_index(name='SZZIssues')
Sonar_issues = pd.merge(sonarIssues, gitCommits, how='inner', left_on='creationCommitHash', right_on='commitHash').drop_duplicates()[['commitHash', 'committer']]
Sonar_issues_committer = Sonar_issues.committer.value_counts().rename_axis('committer').reset_index(name='SonarIssues')
issues = pd.merge(SSZ_issue_committer, Sonar_issues_committer, on='committer', how='outer')
issues = issues.fillna(0)
issues.to_csv('../../data/interim/DataPreparation/ConstructData/INDUCED_ISSUES.csv', header=True)
# JIRA_ISSUES_time
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv").iloc[:,1:]
jiraIssues['creationDate'] = pd.to_datetime(jiraIssues['creationDate'], format="%Y-%m-%dT%H:%M:%S.%f")
jiraIssues['resolutionDate'] = pd.to_datetime(jiraIssues['resolutionDate'], format="%Y-%m-%dT%H:%M:%S.%f")
jiraIssues["resolutionTime"] = jiraIssues["resolutionDate"]
seconds = (jiraIssues.loc[:,"resolutionDate"] - jiraIssues.loc[:,"creationDate"]).dt.total_seconds()
jiraIssues.loc[:,"resolutionTime"] = seconds/3600
jiraIssues.to_csv('../../data/interim/DataPreparation/ConstructData/JIRA_ISSUES_time.csv', header=True)
# NUMBER_COMMITS
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:,2:]
number_commits = gitCommits.groupby(['committer']).count().iloc[1:,1]
number_commits = pd.DataFrame(number_commits).rename(columns={'commitHash': 'numberCommits'})
number_commits.to_csv('../../data/interim/DataPreparation/ConstructData/NUMBER_COMMITS.csv', header=True)
# REFACTORING_MINER_bug
refactoringMiner = pd.read_csv("../../data/interim/DataPreparation/CleanData/REFACTORING_MINER_clean.csv")[['projectID', 'commitHash', 'refactoringType']]
szzFaultInducingCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv")[['projectID', 'faultFixingCommitHash', 'faultInducingCommitHash']]
induced_bug = pd.merge(refactoringMiner, szzFaultInducingCommits, how='left', left_on='commitHash', right_on='faultInducingCommitHash').drop_duplicates().reset_index()
induced_bug['bug'] = induced_bug.apply(lambda x: produce_bug(x), axis=1)
induced_bug = induced_bug[['projectID_x', 'commitHash', 'refactoringType', 'bug']].rename(columns={'projectID_x': 'projectID'})
induced_bug.to_csv('../../data/interim/DataPreparation/ConstructData/REFACTORING_MINER_bug.csv', header=True)
# SONAR_ISSUES_time
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv").iloc[:,1:]
sonarIssues['creationDate'] = pd.to_datetime(sonarIssues['creationDate'], format='%Y-%m-%dT%H:%M:%SZ')
sonarIssues["closeDate"] = pd.to_datetime(sonarIssues["closeDate"], format="%Y-%m-%dT%H:%M:%SZ")
sonarIssues["closeTime"] = sonarIssues["closeDate"]
seconds = (sonarIssues.loc[:,"closeDate"] - sonarIssues.loc[:,"creationDate"]).dt.total_seconds()
sonarIssues.loc[:,"closeTime"] = seconds/3600
sonarIssues.to_csv('../../data/interim/DataPreparation/ConstructData/SONAR_ISSUES_time.csv', header=True)
# SONAR_MEASURES_difference
sonarMeasures = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_MEASURES_clean.csv").iloc[:, 2:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:, 2:]
gitCommits['committerDate'] = pd.to_datetime(gitCommits['committerDate'], format='%Y-%m-%dT%H:%M:%SZ')
newDF = pd.merge(sonarMeasures, gitCommits, how='left', left_on=['commitHash','projectID'], right_on = ['commitHash','projectID'])
newDFNaN = list(np.where(newDF.committerDate.isna()))[0]
newDF = newDF.drop(newDFNaN)
projectID = newDF.projectID.unique()
newDFsorted = newDF.sort_values(by=['projectID', 'committerDate'])
newDFsortedCopy = newDFsorted.copy()
project = newDFsorted.iloc[0,1]
for index, row in newDFsorted.iterrows():
if index < 55625:
if project == newDFsorted.iloc[index,1]:
r = newDFsortedCopy.iloc[index-1:index+1,2:22].diff().iloc[1,:]
newDFsorted.iloc[index:index+1,2:22] = np.array(r)
else:
project = newDFsorted.iloc[index,1]
sonarMeasuresDifference = newDFsorted.iloc[:,:22]
sonarMeasuresDifference.to_csv('../../data/interim/DataPreparation/ConstructData/SONAR_MEASURES_difference.csv', header=True)
# TIME_IN_EACH_PROJECT
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
time_in_project = gitCommits.groupby(['projectID', 'committer'])['committerDate'].agg(['min', 'max']).reset_index()
time = (pd.to_datetime(time_in_project['max'])-pd.to_datetime(time_in_project['min']))
time_in_project['time'] = time.dt.total_seconds()
time_in_project = time_in_project[['projectID', 'committer', 'time']]
time_in_project.to_csv('../../data/interim/DataPreparation/ConstructData/TIME_IN_PROJECT.csv', header=True)
print("Data constructed.")
##################
# INTEGRATE DATA #
##################
print("Integarting data...")
numberCommits = pd.read_csv("../../data/interim/DataPreparation/ConstructData/NUMBER_COMMITS.csv")
fixedIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/FIXED_ISSUES.csv").iloc[:,1:]
fixedIssues = fixedIssues.rename(columns={'SZZIssues':'fixedSZZIssues','SonarIssues':'fixedSonarIssues','JiraIssues':'fixedJiraIssues'})
inducedIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/INDUCED_ISSUES.csv").iloc[:,1:]
inducedIssues = inducedIssues.rename(columns={'SZZIssues':'inducedSZZIssues','SonarIssues':'inducedSonarIssues'})
dataFrame = pd.merge(numberCommits, fixedIssues, how='outer', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
dataFrame = pd.merge(dataFrame, inducedIssues, how='outer', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0)
timeInProject = pd.read_csv("../../data/interim/DataPreparation/ConstructData/TIME_IN_PROJECT.csv").iloc[:,1:]
timeInProject = timeInProject.rename(columns={'time':'timeInProject'})
timeInProject = timeInProject.groupby(['committer']).mean().iloc[1:,:]
dataFrame = pd.merge(dataFrame, timeInProject, how='outer', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0)
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/JIRA_ISSUES_time.csv").iloc[:,1:]
dum = pd.get_dummies(jiraIssues[["type", 'priority']], prefix=['type', 'priority'])
TypePriority = jiraIssues[['assignee']].join(dum)
TypePriority = TypePriority[TypePriority.assignee!='not-assigned'].reset_index().iloc[:,1:]
TypePriority = TypePriority.groupby(["assignee"]).sum()
resolutionTime = jiraIssues.loc[:,['assignee','resolutionTime']]
resolutionTime = resolutionTime.groupby(["assignee"]).mean()
jiraIssues = resolutionTime.join(TypePriority)
jiraIssues = jiraIssues.reset_index().rename(columns={'assignee':'committer'})
dataFrame = pd.merge(dataFrame, jiraIssues, how='left', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
gitCommitsChanges = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_CHANGES_clean.csv").iloc[:,2:]
dum = pd.get_dummies(gitCommitsChanges[["changeType"]])
dum = dum.rename(columns={'changeType_ModificationType.ADD':'ADD', 'changeType_ModificationType.DELETE':'DELETE', 'changeType_ModificationType.MODIFY':'MODIFY', 'changeType_ModificationType.RENAME':'RENAME', 'changeType_ModificationType.UNKNOWN':'UNKNOWN'})
Lines = gitCommitsChanges[["commitHash",'linesAdded','linesRemoved']]
gitCommitsChanges = pd.concat([Lines,dum], axis=1)
gitCommitsChanges = gitCommitsChanges.groupby(['commitHash']).agg({'ADD':'sum', 'DELETE':'sum', 'MODIFY':'sum', 'RENAME':'sum', 'UNKNOWN':'sum', 'linesAdded':'mean', 'linesRemoved':'mean'})
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
gitCommitsChanges = pd.merge(gitCommits, gitCommitsChanges, how='left', left_on=['commitHash'], right_on = ['commitHash'])
gitCommitsChanges = gitCommitsChanges[['committer','ADD','DELETE','MODIFY','RENAME','UNKNOWN','linesAdded','linesRemoved']]
gitCommitsChanges = gitCommitsChanges.groupby(['committer']).mean()
dataFrame = pd.merge(dataFrame, gitCommitsChanges, how='left', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
refactoringMinerBug = pd.read_csv("../../data/interim/DataPreparation/ConstructData/REFACTORING_MINER_bug.csv").iloc[:,1:]
dum = pd.get_dummies(refactoringMinerBug[['refactoringType', 'bug']])
commitHash = refactoringMinerBug[["commitHash"]]
refactoringMinerBug = pd.concat([commitHash,dum], axis=1)
refactoringMinerBug = refactoringMinerBug.groupby(['commitHash']).sum()
refactoringMinerBug = pd.merge(refactoringMinerBug, gitCommits, how='left', left_on=['commitHash'], right_on = ['commitHash'])
refactoringMinerBug = pd.concat([refactoringMinerBug[['committer']], refactoringMinerBug.iloc[:,:-4]], axis=1)
refactoringMinerBug = refactoringMinerBug.groupby(['committer']).sum()
dataFrame = pd.merge(dataFrame, refactoringMinerBug, how='left', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
sonarMeasures = pd.read_csv("../../data/interim/DataPreparation/ConstructData/SONAR_MEASURES_difference.csv").iloc[:,1:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")[['commitHash', 'committer']]
sonarMeasures = pd.merge(sonarMeasures, gitCommits, how='left', on='commitHash').iloc[:,2:]
sonarMeasures_committer = sonarMeasures.groupby(['committer']).agg({'functions':'sum', 'commentLinesDensity':'mean',
'complexity':'sum', 'functionComplexity':'sum', 'duplicatedLinesDensity':'mean', 'violations':'sum', 'blockerViolations':'sum',
'criticalViolations':'sum','infoViolations':'sum','majorViolations':'sum','minorViolations':'sum','codeSmells':'sum',
'bugs':'sum','vulnerabilities':'sum','cognitiveComplexity':'sum','ncloc':'sum','sqaleIndex':'sum',
'sqaleDebtRatio':'sum','reliabilityRemediationEffort':'sum','securityRemediationEffort':'sum'}).reset_index()
dataFrame = | pd.merge(dataFrame, sonarMeasures_committer, how='left', on='committer') | pandas.merge |
import pandas
from text_preprocessing import *
from tensorflow import keras
from tensorflow.keras import layers
import wandb
from wandb.keras import WandbCallback
import pathlib
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import os
import sys
if getattr(sys, 'frozen', False):
PATH = os.path.dirname(sys.executable)
elif __file__:
PATH = os.path.dirname(__file__)
def converter(x):
return json.loads(x)
class EncoderHandler:
def __init__(self, encoders_raw):
self.encoders = [list(map(Encoder.load, encoder)) for encoder in [encoder_layer for encoder_layer in encoders_raw]]
self.encoders_raw = []
for layer in self.encoders:
for e in layer:
self.encoders_raw.append(e)
self.encoder_dict = {}
for encoder_layer in self.encoders:
for encoder in encoder_layer:
self.encoder_dict[encoder.name] = encoder
def bulk_encode(self, text):
results = []
for encoder_layer in self.encoders:
encoder_results = []
for encoder in encoder_layer:
encoder_results += encoder.encode(text)
results.append(np.asarray(encoder_results).reshape((1,-1)))
return results if len(results) > 1 else results[0]
class Package:
def __init__(self, model_name, **kwargs):
self.model_name = model_name
self.model = keras.models.load_model('models/'+model_name) if kwargs.get('load_model', False) else None
self.config = kwargs.get('config')
self.encoder_configs = self.config['encoder_configs']
self.handler = EncoderHandler(self.encoder_configs)
def tokenize_dataset(self, fname):
# tokenize text dataset or load if already exists
# to format: encoder1;encoder2;clickbait
fpath = f'datasets/{fname}.csv'
df = pandas.read_csv(fpath, sep=';')
titles = df['title']
df_dict = dict(clickbait=df['clickbait'])
for layer in self.handler.encoders:
for e in layer:
print(e.name)
tokenized_titles = list(map(e.tokenize, titles))
df_dict[e.name] = tokenized_titles
output_df = | pandas.DataFrame(data=df_dict) | pandas.DataFrame |
"""
pymake
-------------------------------
- <NAME>
- <EMAIL>
-------------------------------
Created 29-05-2018
"""
import pandas
from pymake.main import printer
import re
import unidecode
import random
import os
def normalize_str(instr):
output = re.sub(r'[^\w]', ' ', instr).strip().lower()
output = re.sub(' +', ' ', output)
output = output.title()
return output
def normalize_str_nosp(instr):
output = re.sub(r'[^\w]', ' ', instr).strip().lower()
output = re.sub(' +', ' ', output)
output = unidecode.unidecode(output).replace(' ', '_')
return output
def normalize_string(df, column):
df[column] = df[column].apply(normalize_str)
def print_category_info(df, column, n_rows=None, sort=True):
printer.print_info('Número de {1} diferentes: {0}'.format(len(df[column].unique()), column))
df = df.copy()
df['####'] = 0
dfx = df[[column, '####']].groupby(by=[column]).count()
dfx.columns = ['N']
dfx['%'] = 100 * dfx.N / dfx.N.sum()
number_missings = df[column].isnull().sum()
dfx = dfx.append(pandas.Series([number_missings, 100 * number_missings / df.shape[0]], index=dfx.columns, name='NaNs'))
dfx = dfx.append(
| pandas.Series([df.shape[0], 100], index=dfx.columns, name='Total') | pandas.Series |
Subsets and Splits