prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import json
import math
import operator
import warnings
import numpy as np
import pandas as pd
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=np.RankWarning)
np.seterr(divide='ignore', invalid='ignore')
def ema(series, period):
values = np.zeros(len(series))
period = 2.0 / (period + 1)
for i, val in enumerate(series):
values[i] = val if i == 0 else period * val + (1 - period) * values[i - 1]
return values
def sma(series, period):
values = np.zeros(len(series))
for i, val in enumerate(series):
series_slice = series[:i + 1][-min(i + 1, period):]
values[i] = sum(series_slice) / min(i + 1, period)
return values
def change(series):
values = np.zeros(len(series))
for i, val in enumerate(series):
values[i] = 0 if i == 0 else val - series[i - 1]
return values
def linreg(series, period, offset):
values = np.zeros(len(series))
for i, val in enumerate(series):
series_slice = series[:i + 1][-min(i + 1, period):]
coefs = np.polyfit([i for i in range(len(series_slice))], series_slice, 1)
slope = coefs[0]
intercept = coefs[1]
values[i] = intercept + slope * (period - 1 - offset)
return values
def cci(series, period):
values = np.zeros(len(series))
for i, val in enumerate(series):
series_slice = series[:i + 1][-min(i + 1, period):]
current_sma = sma(series_slice, period)[-1]
values[i] = (val - current_sma) / (0.015 * sum([abs(x - current_sma) for x in series_slice]) / period)
return values
def ohlc4(close_prices, open_prices, high_prices, low_prices):
values = np.zeros(len(close_prices))
for i, val in enumerate(close_prices):
values[i] = ((close_prices[i] + open_prices[i] + high_prices[i] + low_prices[i]) / 4)
return values
# call the pinescript code every X minute and make sure that the call happen only when there is an update
def apply_strategy():
global ticks, bars, bars_len, open_orders_count, last_time, tape_len, order_size, current_active, active_order
if len(lob.tape) > tape_len and pd.Timedelta(lob.time, unit='ms') - pd.Timedelta(last_time,
unit='ms') >= pd.Timedelta(period):
ticks = pd.DataFrame(list(lob.tape))
ticks['time'] =
|
pd.to_datetime(ticks['time'], unit='ms')
|
pandas.to_datetime
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from datetime import datetime
# In[2]:
hh_df = pd.read_csv('home_ac/processed_hhdata_86_2.csv')
# print(hh_df.shape)
# hh_df.head(15)
hh_df.drop_duplicates(subset ="localhour", keep = False, inplace = True)
print(hh_df.shape)
# In[3]:
hh_df['hour_index']=0
#hh_df.iloc[-50]
# In[4]:
used = ['localhour', 'use', 'temperature', 'cloud_cover','GH', 'is_weekday','month','hour','AC','DC','hour_index']
datarow= []
# In[5]:
hour_index=0#hour index
hour_value=0
missing_count=0
start_time= pd.to_datetime(hh_df['localhour'].iloc[0][:-3])
for index, row in hh_df.iterrows():
row.localhour=row.localhour[:-3]
#print(row.localhour)
difference=(pd.to_datetime(row.localhour)-pd.to_datetime(hh_df['localhour'].iloc[0][:-3])).total_seconds()/3600
#print("index is difference",difference)
if difference!=hour_index:
gap = difference-hour_index
missing_count += gap
#fill in the missing hours
for i in range(int(gap)):
print("\n---------------------------------------")
print("missing data for hour index:",hour_index+i)
#row.hour=(hour_index+i)%24
temprow=None
#print("this is lastrow",lastrow)
temprow=lastrow
#print("this is temprow",temprow)
temprow.hour_index=hour_index+i
#print("this is hour of lastrow",lastrow.hour)
#temprow.hour = (hour_index+i)%24
current_time = start_time+pd.Timedelta(hour_index+i,unit='h')
temprow.localhour = current_time
temprow.hour = current_time.hour
temprow.month = current_time.month
temprow.is_weekday = int(datetime.strptime(str(current_time), "%Y-%m-%d %H:%M:%S").weekday() < 5)
print("The inserted row is \n",temprow)
#datarow.append(row[used])
datarow.append(temprow[used])
temprow=None
#hour=None
#print(datarow)
hour_index = difference
hour_index +=1
row.hour_index=difference
#hour_value = row.hour
#print(row[used])
#print("reach here")
lastrow = row[used]
datarow.append(row[used])
print("total missing hours",missing_count)
#------------------------------------------testing----------------------------
# hour_index=0 #hour index
# missing_count=0
# for index, row in hh_df.iterrows():
# #print(row.localhour)
# #row.month = float(pd.to_datetime(row.localhour[:-3]).month)
# #row.day = float(pd.to_datetime(row.localhour[:-3]).day)
# #data_hour = float(pd.to_datetime(row.localhour).hour-6)%24
# data_hour = float(pd.to_datetime(row.localhour[:-3]).hour)
# #print(data_hour)
# if data_hour != hour_index%24:
# print("we are missing hours for",row.localhour)
# missing_count += 1
# hour_index +=1
# hour_index += 1
# print("In total missing hours", missing_count)
# for index, row in hh_df.iterrows():
# #row.month = float(pd.to_datetime(row.localhour[:-3]).month)
# #row.day = float(pd.to_datetime(row.localhour[:-3]).day)
# print("------------")
# print(row.localhour)
# print(float(pd.to_datetime(row.localhour).hour-6)%24)
# print(float(pd.to_datetime(row.localhour[:-3]).hour))
# # print(pd.to_datetime(row.localhour))
# # print(pd.to_datetime(row.localhour).tz_localize('UTC'))
# # print(pd.to_datetime(row.localhour).tz_localize('UTC').tz_convert('US/Central'))
# # print(pd.to_datetime(row.localhour[:-3]).tz_localize('US/Central'))
# # print(pd.to_datetime(row.localhour)-pd.Timedelta('06:00:00'))
# In[6]:
df =
|
pd.DataFrame(data=datarow, columns=used)
|
pandas.DataFrame
|
import datetime
import unittest
import numpy as np
import pandas as pd
from dateutil.parser import parse
from helpsk import date, validation
from tests.helpers import subtests_expected_vs_actual
# noinspection PyMethodMayBeStatic
class TestDate(unittest.TestCase):
def test_fiscal_quarter_date(self):
date_values = ['2020-12-01', '2020-12-15', '2020-12-31',
'2021-01-01', '2021-01-15', '2021-01-31',
'2021-02-01', '2021-02-15', '2021-02-28',
'2021-03-01', '2021-03-15', '2021-03-31',
'2021-04-01', '2021-04-15', '2021-04-30',
'2021-05-01', '2021-05-15', '2021-05-31',
'2021-06-01', '2021-06-15', '2021-06-30',
'2021-07-01', '2021-07-15', '2021-07-31',
'2021-08-01', '2021-08-15', '2021-08-31',
'2021-09-01', '2021-09-15', '2021-09-30',
'2021-10-01', '2021-10-15', '2021-10-31',
'2021-11-01', '2021-11-15', '2021-11-30',
'2021-12-01', '2021-12-15', '2021-12-31',
'2022-01-01', '2022-01-15', '2022-01-31']
test_parameters = dict(include_year=True, fiscal_start=1)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [2020.4, 2020.4, 2020.4,
2021.1, 2021.1, 2021.1,
2021.1, 2021.1, 2021.1,
2021.1, 2021.1, 2021.1,
2021.2, 2021.2, 2021.2,
2021.2, 2021.2, 2021.2,
2021.2, 2021.2, 2021.2,
2021.3, 2021.3, 2021.3,
2021.3, 2021.3, 2021.3,
2021.3, 2021.3, 2021.3,
2021.4, 2021.4, 2021.4,
2021.4, 2021.4, 2021.4,
2021.4, 2021.4, 2021.4,
2022.1, 2022.1, 2022.1]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=False, fiscal_start=1)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [4, 4, 4,
1, 1, 1,
1, 1, 1,
1, 1, 1,
2, 2, 2,
2, 2, 2,
2, 2, 2,
3, 3, 3,
3, 3, 3,
3, 3, 3,
4, 4, 4,
4, 4, 4,
4, 4, 4,
1, 1, 1]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=True, fiscal_start=2)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [2021.4, 2021.4, 2021.4,
2021.4, 2021.4, 2021.4,
2022.1, 2022.1, 2022.1,
2022.1, 2022.1, 2022.1,
2022.1, 2022.1, 2022.1,
2022.2, 2022.2, 2022.2,
2022.2, 2022.2, 2022.2,
2022.2, 2022.2, 2022.2,
2022.3, 2022.3, 2022.3,
2022.3, 2022.3, 2022.3,
2022.3, 2022.3, 2022.3,
2022.4, 2022.4, 2022.4,
2022.4, 2022.4, 2022.4,
2022.4, 2022.4, 2022.4]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=False, fiscal_start=2)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [4, 4, 4,
4, 4, 4,
1, 1, 1,
1, 1, 1,
1, 1, 1,
2, 2, 2,
2, 2, 2,
2, 2, 2,
3, 3, 3,
3, 3, 3,
3, 3, 3,
4, 4, 4,
4, 4, 4,
4, 4, 4]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=True, fiscal_start=12)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [2021.1, 2021.1, 2021.1, # 2020-Dec
2021.1, 2021.1, 2021.1, # 2021-Jan
2021.1, 2021.1, 2021.1, # 2021-Feb
2021.2, 2021.2, 2021.2, # 2021-Mar
2021.2, 2021.2, 2021.2, # 2021-Apr
2021.2, 2021.2, 2021.2, # 2021-May
2021.3, 2021.3, 2021.3, # 2021-Jun
2021.3, 2021.3, 2021.3, # 2021-Jul
2021.3, 2021.3, 2021.3, # 2021-Aug
2021.4, 2021.4, 2021.4, # 2021-Sep
2021.4, 2021.4, 2021.4, # 2021-Oct
2021.4, 2021.4, 2021.4, # 2021-Nov
2022.1, 2022.1, 2022.1, # 2021-Dec
2022.1, 2022.1, 2022.1] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=False, fiscal_start=12)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [1, 1, 1, # 2020-Dec
1, 1, 1, # 2021-Jan
1, 1, 1, # 2021-Feb
2, 2, 2, # 2021-Mar
2, 2, 2, # 2021-Apr
2, 2, 2, # 2021-May
3, 3, 3, # 2021-Jun
3, 3, 3, # 2021-Jul
3, 3, 3, # 2021-Aug
4, 4, 4, # 2021-Sep
4, 4, 4, # 2021-Oct
4, 4, 4, # 2021-Nov
1, 1, 1, # 2021-Dec
1, 1, 1] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
def test_fiscal_quarter_datetime(self):
date_values = ['2020-12-01', '2020-12-15', '2020-12-31',
'2021-01-01', '2021-01-15', '2021-01-31',
'2021-02-01', '2021-02-15', '2021-02-28',
'2021-03-01', '2021-03-15', '2021-03-31',
'2021-04-01', '2021-04-15', '2021-04-30',
'2021-05-01', '2021-05-15', '2021-05-31',
'2021-06-01', '2021-06-15', '2021-06-30',
'2021-07-01', '2021-07-15', '2021-07-31',
'2021-08-01', '2021-08-15', '2021-08-31',
'2021-09-01', '2021-09-15', '2021-09-30',
'2021-10-01', '2021-10-15', '2021-10-31',
'2021-11-01', '2021-11-15', '2021-11-30',
'2021-12-01', '2021-12-15', '2021-12-31',
'2022-01-01', '2022-01-15', '2022-01-31']
test_parameters = dict(include_year=True, fiscal_start=1)
results = [date.fiscal_quarter(value=parse(x + ' 23:59:59'), **test_parameters) for x in date_values]
expected = [2020.4, 2020.4, 2020.4,
2021.1, 2021.1, 2021.1,
2021.1, 2021.1, 2021.1,
2021.1, 2021.1, 2021.1,
2021.2, 2021.2, 2021.2,
2021.2, 2021.2, 2021.2,
2021.2, 2021.2, 2021.2,
2021.3, 2021.3, 2021.3,
2021.3, 2021.3, 2021.3,
2021.3, 2021.3, 2021.3,
2021.4, 2021.4, 2021.4,
2021.4, 2021.4, 2021.4,
2021.4, 2021.4, 2021.4,
2022.1, 2022.1, 2022.1]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=False, fiscal_start=1)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [4, 4, 4,
1, 1, 1,
1, 1, 1,
1, 1, 1,
2, 2, 2,
2, 2, 2,
2, 2, 2,
3, 3, 3,
3, 3, 3,
3, 3, 3,
4, 4, 4,
4, 4, 4,
4, 4, 4,
1, 1, 1]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=True, fiscal_start=2)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [2021.4, 2021.4, 2021.4,
2021.4, 2021.4, 2021.4,
2022.1, 2022.1, 2022.1,
2022.1, 2022.1, 2022.1,
2022.1, 2022.1, 2022.1,
2022.2, 2022.2, 2022.2,
2022.2, 2022.2, 2022.2,
2022.2, 2022.2, 2022.2,
2022.3, 2022.3, 2022.3,
2022.3, 2022.3, 2022.3,
2022.3, 2022.3, 2022.3,
2022.4, 2022.4, 2022.4,
2022.4, 2022.4, 2022.4,
2022.4, 2022.4, 2022.4]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=False, fiscal_start=2)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [4, 4, 4,
4, 4, 4,
1, 1, 1,
1, 1, 1,
1, 1, 1,
2, 2, 2,
2, 2, 2,
2, 2, 2,
3, 3, 3,
3, 3, 3,
3, 3, 3,
4, 4, 4,
4, 4, 4,
4, 4, 4]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=True, fiscal_start=12)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [2021.1, 2021.1, 2021.1, # 2020-Dec
2021.1, 2021.1, 2021.1, # 2021-Jan
2021.1, 2021.1, 2021.1, # 2021-Feb
2021.2, 2021.2, 2021.2, # 2021-Mar
2021.2, 2021.2, 2021.2, # 2021-Apr
2021.2, 2021.2, 2021.2, # 2021-May
2021.3, 2021.3, 2021.3, # 2021-Jun
2021.3, 2021.3, 2021.3, # 2021-Jul
2021.3, 2021.3, 2021.3, # 2021-Aug
2021.4, 2021.4, 2021.4, # 2021-Sep
2021.4, 2021.4, 2021.4, # 2021-Oct
2021.4, 2021.4, 2021.4, # 2021-Nov
2022.1, 2022.1, 2022.1, # 2021-Dec
2022.1, 2022.1, 2022.1] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(include_year=False, fiscal_start=12)
results = [date.fiscal_quarter(value=parse(x), **test_parameters) for x in date_values]
expected = [1, 1, 1, # 2020-Dec
1, 1, 1, # 2021-Jan
1, 1, 1, # 2021-Feb
2, 2, 2, # 2021-Mar
2, 2, 2, # 2021-Apr
2, 2, 2, # 2021-May
3, 3, 3, # 2021-Jun
3, 3, 3, # 2021-Jul
3, 3, 3, # 2021-Aug
4, 4, 4, # 2021-Sep
4, 4, 4, # 2021-Oct
4, 4, 4, # 2021-Nov
1, 1, 1, # 2021-Dec
1, 1, 1] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
def test_to_string_date(self):
date_values = ['2020-12-01', '2020-12-15', '2020-12-31',
'2021-01-01', '2021-01-15', '2021-01-31',
'2021-02-01', '2021-02-15', '2021-02-28',
'2021-03-01', '2021-03-15', '2021-03-31',
'2021-04-01', '2021-04-15', '2021-04-30',
'2021-05-01', '2021-05-15', '2021-05-31',
'2021-06-01', '2021-06-15', '2021-06-30',
'2021-07-01', '2021-07-15', '2021-07-31',
'2021-08-01', '2021-08-15', '2021-08-31',
'2021-09-01', '2021-09-15', '2021-09-30',
'2021-10-01', '2021-10-15', '2021-10-31',
'2021-11-01', '2021-11-15', '2021-11-30',
'2021-12-01', '2021-12-15', '2021-12-31',
'2022-01-01', '2022-01-15', '2022-01-31']
test_parameters = dict(granularity=date.Granularity.DAY)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=date_values,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.MONTH)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2020-Dec', '2020-Dec', '2020-Dec',
'2021-Jan', '2021-Jan', '2021-Jan',
'2021-Feb', '2021-Feb', '2021-Feb',
'2021-Mar', '2021-Mar', '2021-Mar',
'2021-Apr', '2021-Apr', '2021-Apr',
'2021-May', '2021-May', '2021-May',
'2021-Jun', '2021-Jun', '2021-Jun',
'2021-Jul', '2021-Jul', '2021-Jul',
'2021-Aug', '2021-Aug', '2021-Aug',
'2021-Sep', '2021-Sep', '2021-Sep',
'2021-Oct', '2021-Oct', '2021-Oct',
'2021-Nov', '2021-Nov', '2021-Nov',
'2021-Dec', '2021-Dec', '2021-Dec',
'2022-Jan', '2022-Jan', '2022-Jan']
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.QUARTER, fiscal_start=1)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2020-Q4', '2020-Q4', '2020-Q4', # 2020-Dec
'2021-Q1', '2021-Q1', '2021-Q1', # 2021-Jan
'2021-Q1', '2021-Q1', '2021-Q1', # 2021-Feb
'2021-Q1', '2021-Q1', '2021-Q1', # 2021-Mar
'2021-Q2', '2021-Q2', '2021-Q2', # 2021-Apr
'2021-Q2', '2021-Q2', '2021-Q2', # 2021-May
'2021-Q2', '2021-Q2', '2021-Q2', # 2021-Jun
'2021-Q3', '2021-Q3', '2021-Q3', # 2021-Jul
'2021-Q3', '2021-Q3', '2021-Q3', # 2021-Aug
'2021-Q3', '2021-Q3', '2021-Q3', # 2021-Sep
'2021-Q4', '2021-Q4', '2021-Q4', # 2021-Oct
'2021-Q4', '2021-Q4', '2021-Q4', # 2021-Nov
'2021-Q4', '2021-Q4', '2021-Q4', # 2021-Dec
'2022-Q1', '2022-Q1', '2022-Q1'] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.QUARTER, fiscal_start=2)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2021-FQ4', '2021-FQ4', '2021-FQ4', # 2020-Dec
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Jan
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Feb
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Mar
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Apr
'2022-FQ2', '2022-FQ2', '2022-FQ2', # 2021-May
'2022-FQ2', '2022-FQ2', '2022-FQ2', # 2021-Jun
'2022-FQ2', '2022-FQ2', '2022-FQ2', # 2021-Jul
'2022-FQ3', '2022-FQ3', '2022-FQ3', # 2021-Aug
'2022-FQ3', '2022-FQ3', '2022-FQ3', # 2021-Sep
'2022-FQ3', '2022-FQ3', '2022-FQ3', # 2021-Oct
'2022-FQ4', '2022-FQ4', '2022-FQ4', # 2021-Nov
'2022-FQ4', '2022-FQ4', '2022-FQ4', # 2021-Dec
'2022-FQ4', '2022-FQ4', '2022-FQ4'] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.QUARTER, fiscal_start=12)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2021-FQ1', '2021-FQ1', '2021-FQ1', # 2020-Dec
'2021-FQ1', '2021-FQ1', '2021-FQ1', # 2021-Jan
'2021-FQ1', '2021-FQ1', '2021-FQ1', # 2021-Feb
'2021-FQ2', '2021-FQ2', '2021-FQ2', # 2021-Mar
'2021-FQ2', '2021-FQ2', '2021-FQ2', # 2021-Apr
'2021-FQ2', '2021-FQ2', '2021-FQ2', # 2021-May
'2021-FQ3', '2021-FQ3', '2021-FQ3', # 2021-Jun
'2021-FQ3', '2021-FQ3', '2021-FQ3', # 2021-Jul
'2021-FQ3', '2021-FQ3', '2021-FQ3', # 2021-Aug
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Sep
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Oct
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Nov
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Dec
'2022-FQ1', '2022-FQ1', '2022-FQ1'] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
def test_to_string_datetime(self):
date_values = ['2020-12-01', '2020-12-15', '2020-12-31',
'2021-01-01', '2021-01-15', '2021-01-31',
'2021-02-01', '2021-02-15', '2021-02-28',
'2021-03-01', '2021-03-15', '2021-03-31',
'2021-04-01', '2021-04-15', '2021-04-30',
'2021-05-01', '2021-05-15', '2021-05-31',
'2021-06-01', '2021-06-15', '2021-06-30',
'2021-07-01', '2021-07-15', '2021-07-31',
'2021-08-01', '2021-08-15', '2021-08-31',
'2021-09-01', '2021-09-15', '2021-09-30',
'2021-10-01', '2021-10-15', '2021-10-31',
'2021-11-01', '2021-11-15', '2021-11-30',
'2021-12-01', '2021-12-15', '2021-12-31',
'2022-01-01', '2022-01-15', '2022-01-31']
test_parameters = dict(granularity=date.Granularity.DAY)
results = [date.to_string(value=parse(x + ' 23:59:59'), **test_parameters) for x in date_values]
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=date_values,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.MONTH)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2020-Dec', '2020-Dec', '2020-Dec',
'2021-Jan', '2021-Jan', '2021-Jan',
'2021-Feb', '2021-Feb', '2021-Feb',
'2021-Mar', '2021-Mar', '2021-Mar',
'2021-Apr', '2021-Apr', '2021-Apr',
'2021-May', '2021-May', '2021-May',
'2021-Jun', '2021-Jun', '2021-Jun',
'2021-Jul', '2021-Jul', '2021-Jul',
'2021-Aug', '2021-Aug', '2021-Aug',
'2021-Sep', '2021-Sep', '2021-Sep',
'2021-Oct', '2021-Oct', '2021-Oct',
'2021-Nov', '2021-Nov', '2021-Nov',
'2021-Dec', '2021-Dec', '2021-Dec',
'2022-Jan', '2022-Jan', '2022-Jan']
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.QUARTER, fiscal_start=1)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2020-Q4', '2020-Q4', '2020-Q4', # 2020-Dec
'2021-Q1', '2021-Q1', '2021-Q1', # 2021-Jan
'2021-Q1', '2021-Q1', '2021-Q1', # 2021-Feb
'2021-Q1', '2021-Q1', '2021-Q1', # 2021-Mar
'2021-Q2', '2021-Q2', '2021-Q2', # 2021-Apr
'2021-Q2', '2021-Q2', '2021-Q2', # 2021-May
'2021-Q2', '2021-Q2', '2021-Q2', # 2021-Jun
'2021-Q3', '2021-Q3', '2021-Q3', # 2021-Jul
'2021-Q3', '2021-Q3', '2021-Q3', # 2021-Aug
'2021-Q3', '2021-Q3', '2021-Q3', # 2021-Sep
'2021-Q4', '2021-Q4', '2021-Q4', # 2021-Oct
'2021-Q4', '2021-Q4', '2021-Q4', # 2021-Nov
'2021-Q4', '2021-Q4', '2021-Q4', # 2021-Dec
'2022-Q1', '2022-Q1', '2022-Q1'] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.QUARTER, fiscal_start=2)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2021-FQ4', '2021-FQ4', '2021-FQ4', # 2020-Dec
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Jan
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Feb
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Mar
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Apr
'2022-FQ2', '2022-FQ2', '2022-FQ2', # 2021-May
'2022-FQ2', '2022-FQ2', '2022-FQ2', # 2021-Jun
'2022-FQ2', '2022-FQ2', '2022-FQ2', # 2021-Jul
'2022-FQ3', '2022-FQ3', '2022-FQ3', # 2021-Aug
'2022-FQ3', '2022-FQ3', '2022-FQ3', # 2021-Sep
'2022-FQ3', '2022-FQ3', '2022-FQ3', # 2021-Oct
'2022-FQ4', '2022-FQ4', '2022-FQ4', # 2021-Nov
'2022-FQ4', '2022-FQ4', '2022-FQ4', # 2021-Dec
'2022-FQ4', '2022-FQ4', '2022-FQ4'] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
test_parameters = dict(granularity=date.Granularity.QUARTER, fiscal_start=12)
results = [date.to_string(value=parse(x), **test_parameters) for x in date_values]
expected = ['2021-FQ1', '2021-FQ1', '2021-FQ1', # 2020-Dec
'2021-FQ1', '2021-FQ1', '2021-FQ1', # 2021-Jan
'2021-FQ1', '2021-FQ1', '2021-FQ1', # 2021-Feb
'2021-FQ2', '2021-FQ2', '2021-FQ2', # 2021-Mar
'2021-FQ2', '2021-FQ2', '2021-FQ2', # 2021-Apr
'2021-FQ2', '2021-FQ2', '2021-FQ2', # 2021-May
'2021-FQ3', '2021-FQ3', '2021-FQ3', # 2021-Jun
'2021-FQ3', '2021-FQ3', '2021-FQ3', # 2021-Jul
'2021-FQ3', '2021-FQ3', '2021-FQ3', # 2021-Aug
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Sep
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Oct
'2021-FQ4', '2021-FQ4', '2021-FQ4', # 2021-Nov
'2022-FQ1', '2022-FQ1', '2022-FQ1', # 2021-Dec
'2022-FQ1', '2022-FQ1', '2022-FQ1'] # 2022-Jan
subtests_expected_vs_actual(test_case=self, actual_values=results, expected_values=expected,
**test_parameters)
def test_floor_missing_value(self):
self.assertTrue(date.floor(value=pd.NA, granularity=date.Granularity.DAY) is pd.NA)
self.assertTrue(date.floor(value=pd.NaT, granularity=date.Granularity.DAY) is pd.NaT)
self.assertTrue(date.floor(value=np.NaN, granularity=date.Granularity.DAY) is np.NaN)
self.assertTrue(date.floor(value=None, granularity=date.Granularity.DAY) is None) # noqa
self.assertTrue(date.floor(value=pd.NA, granularity=date.Granularity.MONTH) is pd.NA)
self.assertTrue(date.floor(value=pd.NaT, granularity=date.Granularity.MONTH) is pd.NaT)
self.assertTrue(date.floor(value=np.NaN, granularity=date.Granularity.MONTH) is np.NaN)
self.assertTrue(date.floor(value=None, granularity=date.Granularity.MONTH) is None) # noqa
self.assertTrue(date.floor(value=pd.NA, granularity=date.Granularity.QUARTER) is pd.NA)
self.assertTrue(date.floor(value=pd.NaT, granularity=date.Granularity.QUARTER) is pd.NaT)
self.assertTrue(date.floor(value=np.NaN, granularity=date.Granularity.QUARTER) is np.NaN)
self.assertTrue(date.floor(value=None, granularity=date.Granularity.QUARTER) is None) # noqa
def test_floor_day(self):
# test datetime
value = datetime.datetime(year=2021, month=2, day=13, hour=23, minute=45, second=55)
self.assertEqual(date.floor(value, granularity=date.Granularity.DAY),
parse('2021-02-13').date())
self.assertEqual(date.floor(value),
parse('2021-02-13').date())
# test date
value = datetime.date(year=2021, month=2, day=13)
self.assertEqual(date.floor(value, granularity=date.Granularity.DAY),
parse('2021-02-13').date())
self.assertEqual(date.floor(value),
parse('2021-02-13').date())
def test_floor_month(self):
# test datetime
value = datetime.datetime(year=2021, month=1, day=1, hour=23, minute=45, second=55)
self.assertEqual(date.floor(value, granularity=date.Granularity.MONTH),
parse('2021-01-01').date())
value = datetime.datetime(year=2021, month=1, day=31, hour=23, minute=45, second=55)
self.assertEqual(date.floor(value, granularity=date.Granularity.MONTH),
parse('2021-01-01').date())
value = datetime.datetime(year=2021, month=12, day=1, hour=23, minute=45, second=55)
self.assertEqual(date.floor(value, granularity=date.Granularity.MONTH),
parse('2021-12-01').date())
value = datetime.datetime(year=2021, month=12, day=31, hour=23, minute=45, second=55)
self.assertEqual(date.floor(value, granularity=date.Granularity.MONTH),
parse('2021-12-01').date())
# test date
self.assertEqual(date.floor(parse('2021-01-01'), granularity=date.Granularity.MONTH),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-01-31'), granularity=date.Granularity.MONTH),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-12-01'), granularity=date.Granularity.MONTH),
parse('2021-12-01').date())
self.assertEqual(date.floor(parse('2021-12-31'), granularity=date.Granularity.MONTH),
parse('2021-12-01').date())
def test_floor_quarter(self):
# default argument fiscal_start of 1
self.assertEqual(date.floor(parse('2021-01-01'), granularity=date.Granularity.QUARTER),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-01-31'), granularity=date.Granularity.QUARTER),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-02-01'), granularity=date.Granularity.QUARTER),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-02-28'), granularity=date.Granularity.QUARTER),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-03-01'), granularity=date.Granularity.QUARTER),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-03-31'), granularity=date.Granularity.QUARTER),
parse('2021-01-01').date())
self.assertEqual(date.floor(parse('2021-04-01'), granularity=date.Granularity.QUARTER),
parse('2021-04-01').date())
self.assertEqual(date.floor(parse('2021-04-30'), granularity=date.Granularity.QUARTER),
parse('2021-04-01').date())
self.assertEqual(date.floor(parse('2021-05-01'), granularity=date.Granularity.QUARTER),
parse('2021-04-01').date())
self.assertEqual(date.floor(parse('2021-05-31'), granularity=date.Granularity.QUARTER),
parse('2021-04-01').date())
self.assertEqual(date.floor(parse('2021-06-01'), granularity=date.Granularity.QUARTER),
parse('2021-04-01').date())
self.assertEqual(date.floor(parse('2021-06-30'), granularity=date.Granularity.QUARTER),
parse('2021-04-01').date())
self.assertEqual(date.floor(parse('2021-07-01'), granularity=date.Granularity.QUARTER),
parse('2021-07-01').date())
self.assertEqual(date.floor(parse('2021-07-31'), granularity=date.Granularity.QUARTER),
parse('2021-07-01').date())
self.assertEqual(date.floor(parse('2021-08-01'), granularity=date.Granularity.QUARTER),
parse('2021-07-01').date())
self.assertEqual(date.floor(parse('2021-08-31'), granularity=date.Granularity.QUARTER),
parse('2021-07-01').date())
self.assertEqual(date.floor(parse('2021-09-01'), granularity=date.Granularity.QUARTER),
parse('2021-07-01').date())
self.assertEqual(date.floor(parse('2021-09-30'), granularity=date.Granularity.QUARTER),
parse('2021-07-01').date())
self.assertEqual(date.floor(parse('2021-10-01'), granularity=date.Granularity.QUARTER),
parse('2021-10-01').date())
self.assertEqual(date.floor(parse('2021-10-31'), granularity=date.Granularity.QUARTER),
parse('2021-10-01').date())
self.assertEqual(date.floor(parse('2021-11-01'), granularity=date.Granularity.QUARTER),
parse('2021-10-01').date())
self.assertEqual(date.floor(parse('2021-11-30'), granularity=date.Granularity.QUARTER),
parse('2021-10-01').date())
self.assertEqual(date.floor(parse('2021-12-01'), granularity=date.Granularity.QUARTER),
parse('2021-10-01').date())
self.assertEqual(date.floor(parse('2021-12-31'), granularity=date.Granularity.QUARTER),
parse('2021-10-01').date())
# fiscal quarter starts in February
self.assertEqual(date.floor(parse('2021-01-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2020-11-01').date())
self.assertEqual(date.floor(parse('2021-01-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2020-11-01').date())
self.assertEqual(date.floor(parse('2021-02-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-02-28'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-03-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-03-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-04-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-04-30'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-05-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-05-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-06-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-06-30'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-07-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-07-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-08-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-08-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-09-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-09-30'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-10-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-10-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-11-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-11-01').date())
self.assertEqual(date.floor(parse('2021-11-30'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-11-01').date())
self.assertEqual(date.floor(parse('2021-12-01'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-11-01').date())
self.assertEqual(date.floor(parse('2021-12-31'), granularity=date.Granularity.QUARTER,
fiscal_start=2),
parse('2021-11-01').date())
# fiscal quarter starts in November (should be same as February)
self.assertEqual(date.floor(parse('2021-01-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2020-11-01').date())
self.assertEqual(date.floor(parse('2021-01-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2020-11-01').date())
self.assertEqual(date.floor(parse('2021-02-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-02-28'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-03-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-03-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-04-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-04-30'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-02-01').date())
self.assertEqual(date.floor(parse('2021-05-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-05-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-06-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-06-30'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-07-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-07-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-05-01').date())
self.assertEqual(date.floor(parse('2021-08-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-08-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-09-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-09-30'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-10-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-10-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-08-01').date())
self.assertEqual(date.floor(parse('2021-11-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-11-01').date())
self.assertEqual(date.floor(parse('2021-11-30'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-11-01').date())
self.assertEqual(date.floor(parse('2021-12-01'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-11-01').date())
self.assertEqual(date.floor(parse('2021-12-31'), granularity=date.Granularity.QUARTER,
fiscal_start=11),
parse('2021-11-01').date())
# fiscal quarter starts in June
self.assertEqual(date.floor(parse('2021-01-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2020-12-01').date())
self.assertEqual(date.floor(parse('2021-01-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2020-12-01').date())
self.assertEqual(date.floor(parse('2021-02-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2020-12-01').date())
self.assertEqual(date.floor(parse('2021-02-28'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2020-12-01').date())
self.assertEqual(date.floor(parse('2021-03-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-03-01').date())
self.assertEqual(date.floor(parse('2021-03-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-03-01').date())
self.assertEqual(date.floor(parse('2021-04-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-03-01').date())
self.assertEqual(date.floor(parse('2021-04-30'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-03-01').date())
self.assertEqual(date.floor(parse('2021-05-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-03-01').date())
self.assertEqual(date.floor(parse('2021-05-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-03-01').date())
self.assertEqual(date.floor(parse('2021-06-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-06-01').date())
self.assertEqual(date.floor(parse('2021-06-30'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-06-01').date())
self.assertEqual(date.floor(parse('2021-07-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-06-01').date())
self.assertEqual(date.floor(parse('2021-07-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-06-01').date())
self.assertEqual(date.floor(parse('2021-08-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-06-01').date())
self.assertEqual(date.floor(parse('2021-08-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-06-01').date())
self.assertEqual(date.floor(parse('2021-09-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-09-01').date())
self.assertEqual(date.floor(parse('2021-09-30'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-09-01').date())
self.assertEqual(date.floor(parse('2021-10-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-09-01').date())
self.assertEqual(date.floor(parse('2021-10-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-09-01').date())
self.assertEqual(date.floor(parse('2021-11-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-09-01').date())
self.assertEqual(date.floor(parse('2021-11-30'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-09-01').date())
self.assertEqual(date.floor(parse('2021-12-01'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-12-01').date())
self.assertEqual(date.floor(parse('2021-12-31'), granularity=date.Granularity.QUARTER,
fiscal_start=6),
parse('2021-12-01').date())
def test_floor_series(self):
date_series = pd.Series(pd.to_datetime([
'2021-01-01 00:00:00', '2021-01-01 00:00:01',
np.NaN,
'2021-01-02 00:00:01', '2021-01-02 23:59:59',
'2021-02-01 00:00:00', '2021-02-01 00:00:01',
np.NaN,
'2021-02-02 00:00:01', '2021-02-02 23:59:59',
'2021-03-01 00:00:00', '2021-03-01 00:00:01',
np.NaN,
'2021-03-02 00:00:01', '2021-03-02 23:59:59',
'2021-04-01 00:00:00', '2021-04-01 00:00:01',
np.NaN,
'2021-04-02 00:00:01', '2021-04-02 23:59:59',
'2021-05-01 00:00:00', '2021-05-01 00:00:01',
np.NaN,
'2021-05-02 00:00:01', '2021-05-02 23:59:59',
'2021-06-01 00:00:00', '2021-06-01 00:00:01',
np.NaN,
'2021-06-02 00:00:01', '2021-06-02 23:59:59',
'2021-07-01 00:00:00', '2021-07-01 00:00:01',
np.NaN,
'2021-07-02 00:00:01', '2021-07-02 23:59:59',
'2021-08-01 00:00:00', '2021-08-01 00:00:01',
np.NaN,
'2021-08-02 00:00:01', '2021-08-02 23:59:59',
'2021-09-01 00:00:00', '2021-09-01 00:00:01',
np.NaN,
'2021-09-02 00:00:01', '2021-09-02 23:59:59',
'2021-10-01 00:00:00', '2021-10-01 00:00:01',
np.NaN,
'2021-10-02 00:00:01', '2021-10-02 23:59:59',
'2021-11-01 00:00:00', '2021-11-01 00:00:01',
np.NaN,
'2021-11-02 00:00:01', '2021-11-02 23:59:59',
'2021-12-01 00:00:00', '2021-12-01 00:00:01',
np.NaN,
'2021-12-02 00:00:01', '2021-12-02 23:59:59',
]))
expected_day = pd.Series(pd.to_datetime([
'2021-01-01', '2021-01-01',
np.NaN,
'2021-01-02', '2021-01-02',
'2021-02-01', '2021-02-01',
np.NaN,
'2021-02-02', '2021-02-02',
'2021-03-01', '2021-03-01',
np.NaN,
'2021-03-02', '2021-03-02',
'2021-04-01', '2021-04-01',
np.NaN,
'2021-04-02', '2021-04-02',
'2021-05-01', '2021-05-01',
np.NaN,
'2021-05-02', '2021-05-02',
'2021-06-01', '2021-06-01',
np.NaN,
'2021-06-02', '2021-06-02',
'2021-07-01', '2021-07-01',
np.NaN,
'2021-07-02', '2021-07-02',
'2021-08-01', '2021-08-01',
np.NaN,
'2021-08-02', '2021-08-02',
'2021-09-01', '2021-09-01',
np.NaN,
'2021-09-02', '2021-09-02',
'2021-10-01', '2021-10-01',
np.NaN,
'2021-10-02', '2021-10-02',
'2021-11-01', '2021-11-01',
np.NaN,
'2021-11-02', '2021-11-02',
'2021-12-01', '2021-12-01',
np.NaN,
'2021-12-02', '2021-12-02',
]))
expected_month = pd.Series(pd.to_datetime([
'2021-01-01', '2021-01-01',
np.NaN,
'2021-01-01', '2021-01-01',
'2021-02-01', '2021-02-01',
np.NaN,
'2021-02-01', '2021-02-01',
'2021-03-01', '2021-03-01',
np.NaN,
'2021-03-01', '2021-03-01',
'2021-04-01', '2021-04-01',
np.NaN,
'2021-04-01', '2021-04-01',
'2021-05-01', '2021-05-01',
np.NaN,
'2021-05-01', '2021-05-01',
'2021-06-01', '2021-06-01',
np.NaN,
'2021-06-01', '2021-06-01',
'2021-07-01', '2021-07-01',
np.NaN,
'2021-07-01', '2021-07-01',
'2021-08-01', '2021-08-01',
np.NaN,
'2021-08-01', '2021-08-01',
'2021-09-01', '2021-09-01',
np.NaN,
'2021-09-01', '2021-09-01',
'2021-10-01', '2021-10-01',
np.NaN,
'2021-10-01', '2021-10-01',
'2021-11-01', '2021-11-01',
np.NaN,
'2021-11-01', '2021-11-01',
'2021-12-01', '2021-12-01',
np.NaN,
'2021-12-01', '2021-12-01',
]))
expected_quarter = pd.Series(pd.to_datetime([
'2021-01-01', '2021-01-01',
np.NaN,
'2021-01-01', '2021-01-01',
'2021-01-01', '2021-01-01',
np.NaN,
'2021-01-01', '2021-01-01',
'2021-01-01', '2021-01-01',
np.NaN,
'2021-01-01', '2021-01-01',
'2021-04-01', '2021-04-01',
np.NaN,
'2021-04-01', '2021-04-01',
'2021-04-01', '2021-04-01',
np.NaN,
'2021-04-01', '2021-04-01',
'2021-04-01', '2021-04-01',
np.NaN,
'2021-04-01', '2021-04-01',
'2021-07-01', '2021-07-01',
np.NaN,
'2021-07-01', '2021-07-01',
'2021-07-01', '2021-07-01',
np.NaN,
'2021-07-01', '2021-07-01',
'2021-07-01', '2021-07-01',
np.NaN,
'2021-07-01', '2021-07-01',
'2021-10-01', '2021-10-01',
np.NaN,
'2021-10-01', '2021-10-01',
'2021-10-01', '2021-10-01',
np.NaN,
'2021-10-01', '2021-10-01',
'2021-10-01', '2021-10-01',
np.NaN,
'2021-10-01', '2021-10-01',
]))
# without series.name
validation.assert_dataframes_match([
pd.DataFrame(date_series.dt.date),
pd.DataFrame(expected_day.dt.date),
pd.DataFrame(date.floor(date_series, granularity=date.Granularity.DAY))
])
validation.assert_dataframes_match([
pd.DataFrame(expected_month.dt.date),
pd.DataFrame(date.floor(date_series, granularity=date.Granularity.MONTH))
])
validation.assert_dataframes_match([
pd.DataFrame(expected_quarter.dt.date),
pd.DataFrame(date.floor(date_series, granularity=date.Granularity.QUARTER))
])
# with series.name
date_series.name = 'date_day'
expected_day.name = 'date_day'
actual_values = date.floor(date_series, granularity=date.Granularity.DAY)
self.assertEqual(actual_values.name, 'date_day')
validation.assert_dataframes_match([
pd.DataFrame(expected_day.dt.date),
pd.DataFrame(actual_values)
])
date_series.name = 'date_month'
expected_day.name = 'date_month'
actual_values = date.floor(date_series, granularity=date.Granularity.MONTH)
self.assertEqual(actual_values.name, 'date_month')
validation.assert_dataframes_match([
pd.DataFrame(expected_month.dt.date),
pd.DataFrame(actual_values)
])
date_series.name = 'date_quarter'
expected_day.name = 'date_quarter'
actual_values = date.floor(date_series, granularity=date.Granularity.QUARTER)
self.assertEqual(actual_values.name, 'date_quarter')
validation.assert_dataframes_match([
|
pd.DataFrame(expected_quarter.dt.date)
|
pandas.DataFrame
|
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import numpy as np
import itertools
def cramers_corrected_stat(confusion_matrix):
""" calculate Cramers V statistic for categorical-categorical association.
uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
"""
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
return np.sqrt(phi2corr / min( (kcorr-1), (rcorr-1)))
# Page layout
## Page expands to full width
st.set_page_config(page_title='Data Science App',
layout='wide')
# Model building
def build_model(data):
sns.set_style('darkgrid')
global target_variable
st.markdown('**1.2- Dataset general info**')
st.text('Dataset shape:')
st.text(df.shape)
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
st.text("Categorical Variables:")
st.text(categorical_attributes)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.text("Numerical Variables:")
st.text(numerical_attributes)
st.markdown('**1.3- Duplicated values**')
st.text(data.duplicated().sum())
st.markdown('**1.4- Missing values**')
st.text(data.isnull().sum())
st.markdown('**1.5- Unique values in the Categorical Variables**')
for col_name in data.columns:
if data[col_name].dtypes == 'object':
unique_cat = len(data[col_name].unique())
st.text("Feature '{col_name}' has {unique_cat} unique categories".format(col_name=col_name, unique_cat=unique_cat))
st.subheader('2- Exploratory Data Analysis (EDA)')
hue = target_variable
st.markdown('**2.1- Descriptive Statistics**')
st.text(data.describe())
st.markdown('**2.2- Outlier detectetion by Boxplot**')
if len(numerical_attributes) == 0:
st.text('There is no numerical variable')
else:
for a in numerical_attributes:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.boxplot(data[a])
st.pyplot(fig)
if data[target_variable].dtypes == 'O':
catplots(data)
else:
if len(data[target_variable].unique()) > 5:
numplots(data)
else:
catplots(data)
def catplots(data):
sns.set_style('darkgrid')
global target_variable
hue = target_variable
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.markdown('**2.3- Target Variable plot**')
st.text("Target variable:" + hue)
fig = plt.figure(figsize = (20,10))
ax = sns.countplot(data[hue])
for p in ax.patches:
height = p.get_height()
ax.text(x = p.get_x()+(p.get_width()/2), y = height*1.01, s = '{:.0f}'.format(height), ha = 'center')
st.pyplot(fig)
st.markdown('**2.4- Numerical Variables**')
#fig = plt.figure(figsize = (5,5))
#sns.pairplot(data, hue = hue)
#st.pyplot(fig)
st.markdown('***2.4.1- Correlation***')
try:
fig = plt.figure(figsize = (20,10))
sns.heatmap(data.corr(), cmap = 'Blues', annot = True)
st.pyplot(fig)
except:
st.text('There is no numerical variable')
st.markdown('***2.4.2- Distributions***')
for a in numerical_attributes:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.histplot(data = data , x =a , kde = True, hue = hue)
st.pyplot(fig)
st.markdown('**2.5- Categorical Variables**')
if len(categorical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in categorical_attributes:
if a == hue:
pass
else:
if len(data[a].unique()) < 13:
st.text(a)
fig = plt.figure()
g = sns.catplot(data = data, x = a, kind = 'count', col = hue, sharey=False)
for i in range(data[hue].nunique()):
ax = g.facet_axis(0,i)
for p in ax.patches:
height = p.get_height()
ax.text(x = p.get_x()+(p.get_width()/2), y = height * 1.01 , s = '{:.0f}'.format(height), ha = 'center')
g.set_xticklabels(rotation=90)
st.pyplot(g)
st.markdown('***2.5.1 - Correlation between categorical***')
corrM = np.zeros((len(categorical_attributes),len(categorical_attributes)))
for col1, col2 in itertools.combinations(categorical_attributes, 2):
idx1, idx2 = categorical_attributes.index(col1), categorical_attributes.index(col2)
corrM[idx1, idx2] = cramers_corrected_stat(pd.crosstab(data[col1], data[col2]))
corrM[idx2, idx1] = corrM[idx1, idx2]
corr = pd.DataFrame(corrM, index=categorical_attributes, columns=categorical_attributes)
fig = plt.figure(figsize=(20, 10))
sns.heatmap(corr, annot=True, cmap = 'Blues')
plt.title("Cramer V Correlation between Variables")
st.pyplot(fig)
def numplots(data):
sns.set_style('darkgrid')
global target_variable
hue = target_variable
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.markdown('**2.3- Target Variable plot**')
st.text("Target variable:" + hue)
fig = plt.figure(figsize = (20,10))
sns.histplot(data = data , x = hue , kde = True)
st.pyplot(fig)
st.markdown('**2.4- Numerical Variables**')
if len(numerical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in numerical_attributes:
if a == hue:
pass
else:
st.text(a)
fig = plt.figure(figsize = (20,10))
fig = sns.lmplot(data = data, x = a, y = hue)
st.pyplot(fig)
st.markdown('**2.5- Categorical Variables**')
if len(categorical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in categorical_attributes:
if a == hue:
pass
else:
if len(data[a].unique()) < 13:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.kdeplot(data = data, x = hue ,hue = a)
st.pyplot(fig)
st.markdown('***2.5.1 - Correlation between categorical***')
corrM = np.zeros((len(categorical_attributes),len(categorical_attributes)))
for col1, col2 in itertools.combinations(categorical_attributes, 2):
idx1, idx2 = categorical_attributes.index(col1), categorical_attributes.index(col2)
corrM[idx1, idx2] = cramers_corrected_stat(
|
pd.crosstab(data[col1], data[col2])
|
pandas.crosstab
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx,
|
Series(idx)
|
pandas.Series
|
import pandas as pd
import matplotlib.pyplot as plt
import re
map_length = 16
token = ':'
comment = '#'
name = 'name'
path = "/Users/chuan/Project/artificial_intelligence_project/outputs/"
#%% Read in data
data = list()
with open(path + 'analysis.txt') as f:
while True:
# reached end of the line
line = f.readline()
if not line:
break
# store information
map_info = dict()
# read in the map
diagram = list()
for i in range(map_length):
diagram.append(line)
line = f.readline()
map_info['diagram'] = ''.join(diagram)
# skip everything else
while line[0] == comment:
line = f.readline()
# read in the important information
for i in range(3):
key, value = line[:-1].split(token)
if key == 'name':
a = re.match(r".*sample(\d*).json", value)
value = a.group(1)
map_info[key] = value
line = f.readline()
while line and line[0] != comment:
line = f.readline()
# store
data.append(map_info)
#%% Clean
types = {'n_nodes' : int, 'n_steps' : int, 'name' : str, 'diagram' : str}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# finpie - a simple library to download some financial data
# https://github.com/peterlacour/finpie
#
# Copyright (c) 2020 <NAME>
#
# Licensed under the MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import time
import pandas as pd
from bs4 import BeautifulSoup as bs
from requests_html import HTMLSession
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from finpie.base import DataBase
class Earnings(DataBase):
def __init__(self, ticker):
DataBase.__init__(self)
self.ticker = ticker
def transcripts(self, html = True):
'''
....
'''
url = 'https://www.fool.com/'
driver = self._load_driver('none')
try:
driver.get(url)
try:
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[@id="gdpr-modal-background"]')))
element = driver.find_element_by_xpath('//div[@id="gdpr-modal-background"]')
self._delete_element(driver, element)
element = driver.find_element_by_xpath('//div[@id="gdpr-modal-content"]')
self._delete_element(driver, element)
except:
pass
element = driver.find_element_by_xpath('//input[@class="ticker-input-input"]')
element.clear()
element.send_keys(self.ticker)
time.sleep(0.2)
element.send_keys(' ')
time.sleep(1)
element.send_keys(Keys.RETURN)
try:
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[@id="gdpr-modal-background"]')))
element = driver.find_element_by_xpath('//div[@id="gdpr-modal-background"]')
self._delete_element(driver, element)
element = driver.find_element_by_xpath('//div[@id="gdpr-modal-content"]')
self._delete_element(driver, element)
except:
pass
element = driver.find_element_by_xpath('//a[@id="earnings"]')
self._scroll_to_element(driver, element)
element.click()
bool = True
while bool:
try:
element = driver.find_element_by_xpath('//div[@id="quote_page_earnings_listing"]//button[@id="load-more"]')
self._scroll_to_element(driver, element)
element.click()
except:
bool = False
links = [ l.get_attribute('href') for l in driver.find_elements_by_xpath('//div[@id="quote_page_earnings_listing"]//a[@data-id="article-list-hl"]') ]
driver.quit()
except:
print('Failed..')
driver.quit()
return None
session = HTMLSession()
df = []
for link in links:
r = session.get(link)
soup = bs(r.content, 'html5lib')
#date = soup.find('span', class_ = 'article-content').find('span', id = 'date').text
text = soup.find('span', class_ = 'article-content').find_all(['h2', 'p'])[3:]
headings = [ i for i, h in enumerate(text) if '<h2>' in str(h) ]
temp = []
for i in range(1,len(headings)):
temp.append( ' \n '.join([ t.text for t in text[headings[i-1]:headings[i]] ]) )
temp.append( ' \n '.join([ t.text for t in text[headings[-1]:]] ) )
temp = { t.split(':')[0].lower().replace(' ', '_').replace('&', 'and'): ' \n '.join(t.split(' \n ')[1:]) for t in temp if t.split(':')[0].lower() != 'contents'}
temp['ticker'] = self.ticker
if html:
temp['html'] = ' '.join([ str(t) for t in text ])
pattern = re.compile('([12]\d{3}/(0[0-9]|1[0-9])/(0[0-9]|[12]\d|3[01]))')
date = pattern.search( link )[0]
temp['date'] = date
text = soup.find('span', class_ = 'article-content').find_all('p')[1].text
if text == 'Image source: The Motley Fool.':
text = soup.find('span', class_ = 'article-content').find_all('p')[2].find('em').text
temp['time'] = text
else:
try:
text = soup.find('span', class_ = 'article-content').find_all('p')[1].find('em').text
temp['time'] = text
except:
temp['time'] = soup.find('span', class_ = 'article-content').find_all('p')[1].text.split(',')[-1].strip()
#soup.find('span', class_ = 'article-content').find('em', id = 'time').text
text = soup.find('span', class_ = 'article-content').find_all(['h2', 'p'])[1].text
if text == 'Image source: The Motley Fool.':
text = soup.find('span', class_ = 'article-content').find_all(['h2', 'p'])[2].text
try:
pattern = re.compile('(Q\d\ \d{4})')
temp['quarter'] = pattern.search(text)[0]
except:
pattern = re.compile('(Q\d\\xa0\d{4})')
temp['quarter'] = pattern.search(text)[0].replace(u'\xa0', u' ')
temp['link'] = link # need to add this to access in browser?
df.append( pd.DataFrame( temp, index = [date] ) )
df =
|
pd.concat(df)
|
pandas.concat
|
import numpy as np
from numpy.random import seed
seed(1)
import pandas as pd
from math import sqrt
from sklearn.decomposition import PCA
######################################################################
# METRICS
######################################################################
def mse(y, y_hat):
"""
Calculates Mean Squared Error.
MSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: MSE
"""
mse = np.mean(np.square(y - y_hat))
return mse
def rmse(y, y_hat):
"""
Calculates Root Mean Squared Error.
RMSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Finally the RMSE will be in the same scale
as the original time series so its comparison with other
series is possible only if they share a common scale.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: RMSE
"""
rmse = sqrt(np.mean(np.square(y - y_hat)))
return rmse
def mape(y, y_hat):
"""
Calculates Mean Absolute Percentage Error.
MAPE measures the relative prediction accuracy of a
forecasting method by calculating the percentual deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: MAPE
"""
mape = np.mean(np.abs(y - y_hat) / np.abs(y))
mape = 100 * mape
return mape
def smape(y, y_hat):
"""
Calculates Symmetric Mean Absolute Percentage Error.
SMAPE measures the relative prediction accuracy of a
forecasting method by calculating the relative deviation
of the prediction and the true value scaled by the sum of the
absolute values for the prediction and true value at a
given time, then averages these devations over the length
of the series. This allows the SMAPE to have bounds between
0% and 200% which is desireble compared to normal MAPE that
may be undetermined.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: SMAPE
"""
smape = np.mean(np.abs(y - y_hat) / (np.abs(y) + np.abs(y_hat)))
smape = 200 * smape
return smape
def mase(y, y_hat, y_train, seasonality=1):
"""
Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean absolute errors
of the prediction and the true value against the mean
absolute errors of the seasonal naive model.
y: numpy array
actual test values
y_hat: numpy array
predicted values
y_train: numpy array
actual train values for Naive1 predictions
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: MASE
"""
scale = np.mean(abs(y_train[seasonality:] - y_train[:-seasonality]))
mase = np.mean(abs(y - y_hat)) / scale
mase = 100 * mase
return mase
def rmsse(y, y_hat, y_train, seasonality=1):
"""
Calculates the M5 Root Mean Squared Scaled Error.
Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean squared errors
of the prediction and the true value against the mean
squared errors of the seasonal naive model.
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: RMSSE
"""
scale = np.mean(np.square(y_train[seasonality:] - y_train[:-seasonality]))
rmsse = sqrt(mse(y, y_hat) / scale)
rmsse = 100 * rmsse
return rmsse
def pinball_loss(y, y_hat, tau=0.5):
"""
Calculates the Pinball Loss.
The Pinball loss measures the deviation of a quantile forecast.
By weighting the absolute deviation in a non symmetric way, the
loss pays more attention to under or over estimation.
A common value for tau is 0.5 for the deviation from the median.
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
tau: float
Fixes the quantile against which the predictions are compared.
return: pinball_loss
"""
delta_y = y - y_hat
pinball = np.maximum(tau * delta_y, (tau-1) * delta_y)
pinball = pinball.mean()
return pinball_loss
def evaluate_panel(y_test, y_hat, y_train,
metric, seasonality):
"""
Calculates a specific metric for y and y_hat
y_test: pandas df
df with columns unique_id, ds, y
y_hat: pandas df
df with columns unique_id, ds, y_hat
y_train: pandas df
df with columns unique_id, ds, y (train)
this is used in the scaled metrics
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: list of metric evaluations for each unique_id
in the panel data
"""
metric_name = metric.__code__.co_name
uids = y_test.index.get_level_values('unique_id').unique()
y_hat_uids = y_hat.index.get_level_values('unique_id').unique()
assert len(y_test)==len(y_hat), "not same length"
assert all(uids == y_hat_uids), "not same u_ids"
idxs, evaluations = [], []
for uid in uids:
y_test_uid = y_test.loc[uid].values
y_hat_uid = y_hat.loc[uid].values
y_train_uid = y_train.loc[uid].y.values
if metric_name in ['mase', 'rmsse']:
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid,
y_train=y_train_uid, seasonality=seasonality)
else:
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid)
idxs.append(uid)
evaluations.append(evaluation_uid)
idxs = pd.Index(idxs, name='unique_id')
evaluations = pd.Series(evaluations, index=idxs)
return evaluations
def compute_evaluations(y_test, y_hat, y_train, metrics, seasonality): #, progress_bar
"""
Calculates all metrics in list for y and y_hat panel data,
and creates rank based on PCA dimensionality reduction.
y_test: pandas df
df with columns unique_id, ds, y
y_hat: pandas df
df with columns unique_id, ds, y_hat
y_train: pandas df
df with columns unique_id, ds, y (train)
this is used in the scaled metrics
metrics: list
list of strings containing all metrics to compute
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: list of metric evaluations
"""
print("\n Evaluating models")
evaluations = {}
for metric_name, metric in metrics.items():
print(metric_name)
for col in y_hat.columns:
mod_evaluation = evaluate_panel(y_test=y_test, y_hat=y_hat[col],
y_train=y_train, metric=metric,
seasonality=seasonality)
mod_evaluation.name = y_hat[col].name
if not (metric_name in evaluations.keys()):
evaluations[metric_name] = [mod_evaluation]
else:
evaluations[metric_name].append(mod_evaluation)
#progress_bar['value']+=1
#progress_bar.update()
# Collapse Metrics
for metric_name, metric in metrics.items():
evaluations[metric_name] =
|
pd.concat(evaluations[metric_name], axis=1)
|
pandas.concat
|
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
import pandas as pd
from scipy.sparse.csgraph import laplacian
from scipy.linalg import eigh
from scipy.integrate import quad
from sklearn.metrics import pairwise_distances
import warnings
import numba
from numba import jit, float32
def IM_dist(G1, G2):
adj1 = nx.to_numpy_array(G1)
adj2 = nx.to_numpy_array(G2)
hwhm = 0.08
N = len(adj1)
# get laplacian matrix
L1 = laplacian(adj1, normed=False)
L2 = laplacian(adj2, normed=False)
# get the modes for the positive-semidefinite laplacian
w1 = np.sqrt(np.abs(eigh(L1)[0][1:]))
w2 = np.sqrt(np.abs(eigh(L2)[0][1:]))
# we calculate the norm for both spectrum
norm1 = (N - 1) * np.pi / 2 - np.sum(np.arctan(-w1 / hwhm))
norm2 = (N - 1) * np.pi / 2 - np.sum(np.arctan(-w2 / hwhm))
# define both spectral densities
density1 = lambda w: np.sum(hwhm / ((w - w1) ** 2 + hwhm ** 2)) / norm1
density2 = lambda w: np.sum(hwhm / ((w - w2) ** 2 + hwhm ** 2)) / norm2
func = lambda w: (density1(w) - density2(w)) ** 2
return np.sqrt(quad(func, 0, np.inf, limit=100)[0])
def build_milestone_net(subgraph, init_node):
'''
Args:
subgraph - a connected component of the graph, csr_matrix
init_node - root node
Returns:
df_subgraph - dataframe of milestone network
'''
if len(subgraph)==1:
warnings.warn('Singular node.')
return []
else:
# Dijkstra's Algorithm
unvisited = {node: {'parent':None,
'score':np.inf,
'distance':np.inf} for node in subgraph.nodes}
current = init_node
currentScore = 0
currentDistance = 0
unvisited[current]['score'] = currentScore
milestone_net = []
while True:
for neighbour in subgraph.neighbors(current):
if neighbour not in unvisited: continue
newScore = currentScore + subgraph[current][neighbour]['weight']
if unvisited[neighbour]['score'] > newScore:
unvisited[neighbour]['score'] = newScore
unvisited[neighbour]['parent'] = current
unvisited[neighbour]['distance'] = currentDistance+1
if len(unvisited)<len(subgraph):
milestone_net.append([unvisited[current]['parent'],
current,
unvisited[current]['distance']])
del unvisited[current]
if not unvisited: break
current, currentScore, currentDistance = \
sorted([(i[0],i[1]['score'],i[1]['distance']) for i in unvisited.items()],
key = lambda x: x[1])[0]
return np.array(milestone_net)
def comp_pseudotime(G, node, df):
connected_comps = nx.node_connected_component(G, node)
subG = G.subgraph(connected_comps)
milestone_net = build_milestone_net(subG,node)
# compute pseudotime
pseudotime = - np.ones(len(df))
for i in range(len(milestone_net)):
_from, _to = milestone_net[i,:2]
_from, _to = int(_from), int(_to)
idc = (df['from']==_from)&(df['to']==_to)
if np.sum(idc)>0:
pseudotime[idc] = df['percentage'].values[idc] + milestone_net[i,-1] - 1
idc = (df['from']==_to)&(df['to']==_from)
if np.sum(idc)>0:
pseudotime[idc] = 1-df['percentage'].values[idc] + milestone_net[i,-1] - 1
if np.any(df['from']==_from):
idc = (df['from']==_from)&(df['to']==_from)
pseudotime[idc] = milestone_net[i,-1] - 1
if len(milestone_net)>0 and np.any((df['from']==_to)&(df['to']==_to)):
idc = (df['from']==_to)&(df['to']==_to)
pseudotime[idc] = milestone_net[i,-1]
return pseudotime
def topology(G_true, G_pred, is_GED=True):
res = {}
# 1. Isomorphism with same initial node
def comparison(N1, N2):
if N1['is_init'] != N2['is_init']:
return False
else:
return True
score_isomorphism = int(nx.is_isomorphic(G_true, G_pred, node_match=comparison))
res['ISO score'] = score_isomorphism
# 2. GED (graph edit distance)
if len(G_true)>10 or len(G_pred)>10:
warnings.warn("Didn't calculate graph edit distances for large graphs.")
res['GED score'] = np.nan
else:
max_num_oper = len(G_true)
score_GED = 1 - np.min([nx.graph_edit_distance(G_pred, G_true, node_match=comparison),
max_num_oper]) / max_num_oper
res['GED score'] = score_GED
# 3. Hamming-Ipsen-Mikhailov distance
if len(G_true)==len(G_pred):
score_IM = 1-IM_dist(G_true, G_pred)
score_IM = np.maximum(0, score_IM)
else:
score_IM = 0
res['score_IM'] = score_IM
return res
@jit((float32[:,:], float32[:,:]), nopython=True, nogil=True)
def _rand_index(true, pred):
n = true.shape[0]
m_true = true.shape[1]
m_pred = pred.shape[1]
RI = 0.0
for i in range(1, n-1):
for j in range(i, n):
RI_ij = 0.0
for k in range(m_true):
RI_ij += true[i,k]*true[j,k]
for k in range(m_pred):
RI_ij -= pred[i,k]*pred[j,k]
RI += 1-np.abs(RI_ij)
return RI / (n*(n-1)/2.0)
def get_GRI(true, pred):
'''
Params:
ture - [n_samples, n_cluster_1] for proportions or [n_samples, ] for grouping
pred - [n_samples, n_cluster_2] for estimated proportions
'''
if len(true)!=len(pred):
raise ValueError('Inputs should have same lengths!')
if len(true.shape)==1:
true =
|
pd.get_dummies(true)
|
pandas.get_dummies
|
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#the License for the specific language governing permissions and limitations under the License.
#
"""
TimeSeriesCalcs
Calculations on time series, such as calculating strategy returns and various wrappers on pandas for rolling sums etc.
"""
import pandas
import math
import datetime
import functools
import numpy
import pandas.tseries.offsets
from pythalesians.util.calendar import Calendar
from pythalesians.timeseries.calcs.timeseriesfilter import TimeSeriesFilter
from pandas.stats.api import ols
class TimeSeriesCalcs:
def calculate_signal_tc(self, signal_data_frame, tc, period_shift = 1):
"""
calculate_signal_tc - Calculates the transaction costs for a particular signal
Parameters
----------
signal_data_frame : DataFrame
contains trading signals
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return (signal_data_frame.shift(period_shift) - signal_data_frame).abs().multiply(tc)
def calculate_entry_tc(self, entry_data_frame, tc, period_shift = 1):
"""
calculate_entry_tc - Calculates the transaction costs for defined trading points
Parameters
----------
entry_data_frame : DataFrame
contains points where we enter/exit trades
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return entry_data_frame.abs().multiply(tc)
def calculate_signal_returns(self, signal_data_frame, returns_data_frame, period_shift = 1):
"""
calculate_signal_returns - Calculates the trading startegy returns for given signal and asset
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return signal_data_frame.shift(period_shift) * returns_data_frame
def calculate_individual_trade_gains(self, signal_data_frame, strategy_returns_data_frame):
"""
calculate_individual_trade_gains - Calculates profits on every trade
Parameters
----------
signal_data_frame : DataFrame
trading signals
strategy_returns_data_frame: DataFrame
returns of strategy to be tested
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
# signal need to be aligned to NEXT period for returns
signal_data_frame_pushed = signal_data_frame.shift(1)
# find all the trade points
trade_points = ((signal_data_frame - signal_data_frame.shift(1)).abs())
cumulative = self.create_mult_index(strategy_returns_data_frame)
indices = trade_points > 0
indices.columns = cumulative.columns
# get P&L for every trade (from the end point - start point)
trade_returns = numpy.nan * cumulative
trade_points_cumulative = cumulative[indices]
# for each set of signals/returns, calculate the trade returns - where there isn't a trade
# assign a NaN
# TODO do in one vectorised step without for loop
for col_name in trade_points_cumulative:
col = trade_points_cumulative[col_name]
col = col.dropna()
col = col / col.shift(1) - 1
# TODO experiment with quicker ways of writing below?
# for val in col.index:
# trade_returns.set_value(val, col_name, col[val])
# trade_returns.ix[val, col_name] = col[val]
date_indices = trade_returns.index.searchsorted(col.index)
trade_returns.ix[date_indices, col_name] = col
return trade_returns
def calculate_signal_returns_matrix(self, signal_data_frame, returns_data_frame, period_shift = 1):
"""
calculate_signal_returns_matrix - Calculates the trading strategy returns for given signal and asset
as a matrix multiplication
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return pandas.DataFrame(
signal_data_frame.shift(period_shift).values * returns_data_frame.values, index = returns_data_frame.index)
def calculate_signal_returns_with_tc(self, signal_data_frame, returns_data_frame, tc, period_shift = 1):
"""
calculate_singal_returns_with_tc - Calculates the trading startegy returns for given signal and asset including
transaction costs
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return signal_data_frame.shift(period_shift) * returns_data_frame - self.calculate_signal_tc(signal_data_frame, tc, period_shift)
def calculate_signal_returns_with_tc_matrix(self, signal_data_frame, returns_data_frame, tc, period_shift = 1):
"""
calculate_singal_returns_with_tc_matrix - Calculates the trading startegy returns for given signal and asset
with transaction costs with matrix multiplication
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return pandas.DataFrame(
signal_data_frame.shift(period_shift).values * returns_data_frame.values -
(numpy.abs(signal_data_frame.shift(period_shift).values - signal_data_frame.values) * tc), index = returns_data_frame.index)
def calculate_returns(self, data_frame, period_shift = 1):
"""
calculate_returns - Calculates the simple returns for an asset
Parameters
----------
data_frame : DataFrame
asset price
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return data_frame / data_frame.shift(period_shift) - 1
def calculate_diff_returns(self, data_frame, period_shift = 1):
"""
calculate_diff_returns - Calculates the differences for an asset
Parameters
----------
data_frame : DataFrame
asset price
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return data_frame - data_frame.shift(period_shift)
def calculate_log_returns(self, data_frame, period_shift = 1):
"""
calculate_log_returns - Calculates the log returns for an asset
Parameters
----------
data_frame : DataFrame
asset price
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return math.log(data_frame / data_frame.shift(period_shift))
def create_mult_index(self, df_rets):
"""
calculate_mult_index - Calculates a multiplicative index for a time series of returns
Parameters
----------
df_rets : DataFrame
asset price returns
Returns
-------
DataFrame
"""
df = 100.0 * (1.0 + df_rets).cumprod()
# get the first non-nan values for rets and then start index
# one before that (otherwise will ignore first rets point)
first_date_indices = df_rets.apply(lambda series: series.first_valid_index())
first_ord_indices = list()
for i in first_date_indices:
try:
ind = df.index.searchsorted(i)
except:
ind = 0
if ind > 0: ind = ind - 1
first_ord_indices.append(ind)
for i in range(0, len(df.columns)):
df.iloc[first_ord_indices[i],i] = 100
return df
def create_mult_index_from_prices(self, data_frame):
"""
calculate_mult_index_from_prices - Calculates a multiplicative index for a time series of prices
Parameters
----------
df_rets : DataFrame
asset price
Returns
-------
DataFrame
"""
return self.create_mult_index(self.calculate_returns(data_frame))
def rolling_z_score(self, data_frame, periods):
"""
rolling_z_score - Calculates the rolling z score for a time series
Parameters
----------
data_frame : DataFrame
asset prices
periods : int
rolling window for z score computation
Returns
-------
DataFrame
"""
return (data_frame - pandas.rolling_mean(data_frame, periods)) / pandas.rolling_std(data_frame, periods)
def rolling_volatility(self, data_frame, periods, obs_in_year = 252):
"""
rolling_volatility - Calculates the annualised rolling volatility
Parameters
----------
data_frame : DataFrame
contains returns time series
obs_in_year : int
number of observation in the year
Returns
-------
DataFrame
"""
return pandas.rolling_std(data_frame, periods) * math.sqrt(obs_in_year)
def rolling_mean(self, data_frame, periods):
return self.rolling_average(data_frame, periods)
def rolling_average(self, data_frame, periods):
"""
rolling_average - Calculates the rolling moving average
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
periods in the average
Returns
-------
DataFrame
"""
return pandas.rolling_mean(data_frame, periods)
def rolling_sparse_average(self, data_frame, periods):
"""
rolling_sparse_average - Calculates the rolling moving average of a sparse time series
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
number of periods in the rolling sparse average
Returns
-------
DataFrame
"""
# 1. calculate rolling sum (ignore NaNs)
# 2. count number of non-NaNs
# 3. average of non-NaNs
foo = lambda z: z[pandas.notnull(z)].sum()
rolling_sum = pandas.rolling_apply(data_frame, periods, foo, min_periods=1)
rolling_non_nans = pandas.stats.moments.rolling_count(data_frame, periods, freq=None, center=False, how=None)
return rolling_sum / rolling_non_nans
def rolling_sparse_sum(self, data_frame, periods):
"""
rolling_sparse_sum - Calculates the rolling moving sum of a sparse time series
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
period for sparse rolling sum
Returns
-------
DataFrame
"""
# 1. calculate rolling sum (ignore NaNs)
# 2. count number of non-NaNs
# 3. average of non-NaNs
foo = lambda z: z[pandas.notnull(z)].sum()
rolling_sum = pandas.rolling_apply(data_frame, periods, foo, min_periods=1)
return rolling_sum
def rolling_median(self, data_frame, periods):
"""
rolling_median - Calculates the rolling moving average
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
number of periods in the median
Returns
-------
DataFrame
"""
return pandas.rolling_median(data_frame, periods)
def rolling_sum(self, data_frame, periods):
"""
rolling_sum - Calculates the rolling sum
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
period for rolling sum
Returns
-------
DataFrame
"""
return pandas.rolling_sum(data_frame, periods)
def cum_sum(self, data_frame):
"""
cum_sum - Calculates the cumulative sum
Parameters
----------
data_frame : DataFrame
contains time series
Returns
-------
DataFrame
"""
return data_frame.cumsum()
def rolling_ewma(self, data_frame, periods):
"""
rolling_ewma - Calculates exponentially weighted moving average
Parameters
----------
data_frame : DataFrame
contains time series
periods : int
periods in the EWMA
Returns
-------
DataFrame
"""
# span = 2 / (1 + periods)
return pandas.ewma(data_frame, span=periods)
##### correlation methods
def rolling_corr(self, data_frame1, periods, data_frame2 = None, pairwise = False, flatten_labels = True):
"""
rolling_corr - Calculates rolling correlation wrapping around pandas functions
Parameters
----------
data_frame1 : DataFrame
contains time series to run correlations on
periods : int
period of rolling correlations
data_frame2 : DataFrame (optional)
contains times series to run correlation against
pairwise : boolean
should we do pairwise correlations only?
Returns
-------
DataFrame
"""
panel = pandas.rolling_corr(data_frame1, data_frame2, periods, pairwise = pairwise)
try:
df = panel.to_frame(filter_observations=False).transpose()
except:
df = panel
if flatten_labels:
if pairwise:
series1 = df.columns.get_level_values(0)
series2 = df.columns.get_level_values(1)
new_labels = []
for i in range(len(series1)):
new_labels.append(series1[i] + " v " + series2[i])
else:
new_labels = []
try:
series1 = data_frame1.columns
except:
series1 = [data_frame1.name]
series2 = data_frame2.columns
for i in range(len(series1)):
for j in range(len(series2)):
new_labels.append(series1[i] + " v " + series2[j])
df.columns = new_labels
return df
# several types of outer join (TODO finalise which one should appear!)
def pandas_outer_join(self, df_list):
if df_list is None: return None
# remove any None elements (which can't be joined!)
df_list = [i for i in df_list if i is not None]
if len(df_list) == 0: return None
elif len(df_list) == 1: return df_list[0]
return df_list[0].join(df_list[1:], how="outer")
def functional_outer_join(self, df_list):
def join_dfs(ldf, rdf):
return ldf.join(rdf, how='outer')
return functools.reduce(join_dfs, df_list)
# experimental!
def iterative_outer_join(self, df_list):
while(True):
# split into two
length = len(df_list)
if length == 1: break
# mid_point = length // 2
df_mini = []
for i in range(0, length, 2):
if i == length - 1:
df_mini.append(df_list[i])
else:
df_mini.append(df_list[i].join(df_list[i+1], how="outer"))
df_list = df_mini
return df_list[0]
def linear_regression(self, df_y, df_x):
return pandas.stats.api.ols(y = df_y, x = df_x)
def linear_regression_single_vars(self, df_y, df_x, y_vars, x_vars):
stats = []
for i in range(0, len(y_vars)):
y = df_y[y_vars[i]]
x = df_x[x_vars[i]]
try:
out = pandas.stats.api.ols(y = y, x = x)
except:
out = None
stats.append(out)
return stats
def strip_linear_regression_output(self, indices, ols_list, var):
if not(isinstance(var, list)):
var = [var]
df = pandas.DataFrame(index = indices, columns=var)
for v in var:
list_o = []
for o in ols_list:
if o is None:
list_o.append(numpy.nan)
else:
if v == 't_stat':
list_o.append(o.t_stat.x)
elif v == 't_stat_intercept':
list_o.append(o.t_stat.intercept)
elif v == 'beta':
list_o.append(o.beta.x)
elif v == 'beta_intercept':
list_o.append(o.beta.intercept)
elif v == 'r2':
list_o.append(o.r2)
elif v == 'r2_adj':
list_o.append(o.r2_adj)
else:
return None
df[v] = list_o
return df
##### various methods for averaging time series by hours, mins and days to create summary time series
def average_by_hour_min_of_day(self, data_frame):
return data_frame.\
groupby([data_frame.index.hour, data_frame.index.minute]).mean()
def average_by_hour_min_of_day_pretty_output(self, data_frame):
data_frame = data_frame.\
groupby([data_frame.index.hour, data_frame.index.minute]).mean()
data_frame.index = data_frame.index.map(lambda t: datetime.time(*t))
return data_frame
def all_by_hour_min_of_day_pretty_output(self, data_frame):
df_new = []
for group in data_frame.groupby(data_frame.index.date):
df_temp = group[1]
df_temp.index = df_temp.index.time
df_temp.columns = [group[0]]
df_new.append(df_temp)
return pandas.concat(df_new, axis=1)
def average_by_year_hour_min_of_day_pretty_output(self, data_frame):
# years = range(data_frame.index[0].year, data_frame.index[-1].year)
#
# time_of_day = []
#
# for year in years:
# temp = data_frame.ix[data_frame.index.year == year]
# time_of_day.append(temp.groupby(temp.index.time).mean())
#
# data_frame = pandas.concat(time_of_day, axis=1, keys = years)
data_frame = data_frame.\
groupby([data_frame.index.year, data_frame.index.hour, data_frame.index.minute]).mean()
data_frame = data_frame.unstack(0)
data_frame.index = data_frame.index.map(lambda t: datetime.time(*t))
return data_frame
def average_by_annualised_year(self, data_frame, obs_in_year = 252):
data_frame = data_frame.\
groupby([data_frame.index.year]).mean() * obs_in_year
return data_frame
def average_by_month(self, data_frame):
data_frame = data_frame.\
groupby([data_frame.index.month]).mean()
return data_frame
def average_by_bus_day(self, data_frame, cal = "FX"):
date_index = data_frame.index
return data_frame.\
groupby([Calendar().get_bus_day_of_month(date_index, cal)]).mean()
def average_by_month_day_hour_min_by_bus_day(self, data_frame, cal = "FX"):
date_index = data_frame.index
return data_frame.\
groupby([date_index.month,
Calendar().get_bus_day_of_month(date_index, cal),
date_index.hour, date_index.minute]).mean()
def average_by_month_day_by_bus_day(self, data_frame, cal = "FX"):
date_index = data_frame.index
return data_frame.\
groupby([date_index.month,
Calendar().get_bus_day_of_month(date_index, cal)]).mean()
def average_by_month_day_by_day(self, data_frame, cal = "FX"):
date_index = data_frame.index
return data_frame.\
groupby([date_index.month, date_index.day]).mean()
def group_by_year(self, data_frame):
date_index = data_frame.index
return data_frame.\
groupby([date_index.year])
def average_by_day_hour_min_by_bus_day(self, data_frame):
date_index = data_frame.index
return data_frame.\
groupby([Calendar().get_bus_day_of_month(date_index),
date_index.hour, date_index.minute]).mean()
def remove_NaN_rows(self, data_frame):
return data_frame.dropna()
def get_top_valued_sorted(self, df, order_column, n = 20):
df_sorted = df.sort(columns=order_column)
df_sorted = df_sorted.tail(n=n)
return df_sorted
def get_bottom_valued_sorted(self, df, order_column, n = 20):
df_sorted = df.sort(columns=order_column)
df_sorted = df_sorted.head(n=n)
return df_sorted
def convert_month_day_to_date_time(self, df, year = 1970):
new_index = []
# TODO use map?
for i in range(0, len(df.index)):
x = df.index[i]
new_index.append(datetime.date(year, x[0], int(x[1])))
df.index = pandas.DatetimeIndex(new_index)
return df
if __name__ == '__main__':
# test functions
tsc = TimeSeriesCalcs()
tsf = TimeSeriesFilter()
# test rolling ewma
date_range =
|
pandas.bdate_range('2014-01-01', '2014-02-28')
|
pandas.bdate_range
|
"""
Model predictions from checkpoint file
"""
import os
import numpy as np
import pickle
import glob
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 50)
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
import pandas as pd
from sklearn.mixture import GaussianMixture
from sklearn.base import TransformerMixin, BaseEstimator
seed = 42
class CustomGMM(TransformerMixin, BaseEstimator):
"""
Gaussian mixture model
component column names and number of component
"""
def __init__(self, col_suffix, cols_component, predict_proba, **kwargs):
self.cols_component = cols_component
self.col_suffix = col_suffix
self.predict_proba = predict_proba
def fit(self, X, y=None):
"""
fit data to model
"""
self.gmm = {}
for col, component in self.cols_component.items():
gmm = GaussianMixture(n_components=component, random_state=seed)
val = X[col].values.reshape(-1, 1)
gmm.fit(val)
self.gmm[col] = gmm
return self
def transform(self, X):
for col, component in self.cols_component.items():
val = X[col].values.reshape(-1, 1)
if self.predict_proba:
# predict probability
proba = self.gmm[col].predict_proba(val)
proba = proba[:, :-1]
# concat data to original frame
col = col + self.col_suffix + "_"
cols = [col + f"{w}" for w in range(proba.shape[1])]
proba =
|
pd.DataFrame(proba, columns=cols)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In this Kernel, I'd like to show you a very basic segmentation technique whihc only applies pure computer vision techniques. Nothing fancy.
#
# At first, I'll show the step-by-step processing and after that I will create the submission for the competition.
#
# With this kernel, I could reach *0.229 LB* which is not very nice but I am sure that with a few tweaks we could get better score. And consider that **we don't even use the train data**! which is pretty awesome in my opinion.
# In[ ]:
import numpy as np
import pandas as pd
import os
from os.path import join
import glob
import cv2
import matplotlib.pyplot as plt
# In[ ]:
TRAIN_PATH = "../input/stage1_train/"
TEST_PATH = "../input/stage1_test/"
# In[ ]:
train_ids = os.listdir(TRAIN_PATH)
test_ids = os.listdir(TEST_PATH)
# In[ ]:
test_image_paths = [
glob.glob(join(TEST_PATH, test_id, "images", "*"))[0] for test_id in test_ids
]
# # Step-by-step processing
# In[ ]:
tmp_image_path = np.random.choice(test_image_paths)
tmp_image = cv2.imread(tmp_image_path, cv2.IMREAD_GRAYSCALE)
# In[ ]:
plt.imshow(tmp_image)
# Now comes the crucial part of the processing. First we would like to create a binary matrix from the original image. The ones in the matrix are considered to be objects and the zeros are the background. So If we don't do this correctly we're going to loose a lot of inforamtion.
# In[ ]:
ret, thresh = cv2.threshold(tmp_image, 100, 255, cv2.THRESH_OTSU)
# In[ ]:
fig, axs = plt.subplots(1, 2, figsize=(10, 10))
axs[0].imshow(tmp_image)
axs[1].imshow(thresh)
# There are images where the thresholding does not help because the ones will be the background and the zeros the objects. This happend when the background is more brighter than the objects.
#
# But how we detect this?
#
# We just have to find the contours of the objects. Than calculate the area of the contour and if it is above some threshold value than we will just invert the image.
# In[ ]:
_, cnts, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
# In[ ]:
max_cnt_area = cv2.contourArea(cnts[0])
# In[ ]:
print("The area of the largest object is: {0}".format(max_cnt_area))
# This is the part where we invert the threshold image if we are not satisfied with the area of the largest contour
# In[ ]:
if max_cnt_area > 50000:
ret, thresh = cv2.threshold(
tmp_image, 100, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV
)
# And here comes the *morphology*.
#
# We will use:
# - Dilation (read more: https://homepages.inf.ed.ac.uk/rbf/HIPR2/dilate.htm)
# - Erosion (read more: https://homepages.inf.ed.ac.uk/rbf/HIPR2/erode.htm)
# In[ ]:
mask = cv2.dilate(thresh, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
mask = cv2.erode(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
# In[ ]:
fig, axs = plt.subplots(1, 4, figsize=(30, 30))
axs[0].imshow(tmp_image)
axs[1].imshow(thresh)
axs[2].imshow(mask)
axs[3].imshow(cv2.bitwise_and(tmp_image, tmp_image, mask=mask))
# # Process the test images for submission
# I separated the logic into 2 funcrtions so it is easier to use it.
# In[ ]:
def threshold(image_gray):
image_gray = cv2.GaussianBlur(image_gray, (7, 7), 1)
ret, thresh = cv2.threshold(image_gray, 0, 255, cv2.THRESH_OTSU)
_, cnts, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
max_cnt_area = cv2.contourArea(cnts[0])
if max_cnt_area > 50000:
ret, thresh = cv2.threshold(
image_gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV
)
return thresh
def apply_morphology(thresh):
mask = cv2.dilate(thresh, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
mask = cv2.erode(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
return mask
# Now we only have to create the mask images from the test images
# In[ ]:
segmented = []
for test_image_path in test_image_paths:
tmp_image = cv2.imread(test_image_path, cv2.IMREAD_GRAYSCALE)
thresh = threshold(tmp_image)
mask = apply_morphology(thresh)
segmented.append(mask)
# In[ ]:
# Run length Encoding from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python
from skimage.morphology import label
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if b > prev + 1:
run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def prob_to_rles(x, cutoff=0.5):
lab_img = label(x > cutoff)
for i in range(1, lab_img.max() + 1):
yield rle_encoding(lab_img == i)
# In[ ]:
new_test_ids = []
rles = []
for n, id_ in enumerate(test_ids):
rle = list(prob_to_rles(segmented[n]))
rles.extend(rle)
new_test_ids.extend([id_] * len(rle))
# In[ ]:
submission_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
from azure.digitaltwins.core import DigitalTwinsClient
from azure.identity import (
DefaultAzureCredential,
AzureCliCredential,
ChainedTokenCredential,
)
url = "https://IOT-ADT.api.wus2.digitaltwins.azure.net"
default_credential = DefaultAzureCredential(
exclude_interactive_browser_credential=False
)
azure_cli = AzureCliCredential()
credential_chain = ChainedTokenCredential(azure_cli, default_credential)
service_client = DigitalTwinsClient(url, credential_chain)
query_expression = "SELECT * FROM digitaltwins"
query_result = service_client.query_twins(query_expression)
twin_list =
|
pd.DataFrame(query_result)
|
pandas.DataFrame
|
import numpy as np
from time import time
from collections import Counter
import networkx as nx
# import gmatch4py as gm
from grakel import graph_from_networkx, RandomWalk
import pandas as pd
import os
from copy import deepcopy
from mvmm.multi_view.block_diag.graph.linalg import get_adjmat_bp
from mvmm.multi_view.base import MultiViewMixtureModelMixin
from mvmm.multi_view.MVMMGridSearch import MVMMGridSearch
from mvmm.multi_view.BlockDiagMVMM import BlockDiagMVMM
from mvmm.multi_view.TwoStage import TwoStage
# from mvmm.multi_view.SpectralPenSearchMVMM import SpectralPenSearchMVMM
from mvmm.multi_view.SpectralPenSearchByBlockMVMM import \
SpectralPenSearchByBlockMVMM
def is_mvmm(estimator):
"""
Returns True iff estimator is a multi-view mixture model.
"""
if isinstance(estimator, MultiViewMixtureModelMixin) or \
isinstance(estimator, MVMMGridSearch) or \
isinstance(estimator, TwoStage) or \
isinstance(estimator, SpectralPenSearchByBlockMVMM):
# isinstance(estimator, SpectralPenSearchMVMM) or \
return True
else:
return False
def is_block_diag_mvmm(estimator):
if isinstance(estimator, MVMMGridSearch):
return is_block_diag_mvmm(estimator.base_estimator)
if isinstance(estimator, TwoStage):
return is_block_diag_mvmm(estimator.base_final)
if isinstance(estimator, BlockDiagMVMM):
return True
else:
return False
def clf_fit_and_score(clf, X_tr, y_tr, X_tst, y_tst):
"""
Fits a classification model and scores the results for the training
and test set.
Parameters
----------
clf:
A sklearn compatible classifier..
X_tr, y_tr: training data and true labels.
X_tst, y_tst: test data and true labels.
Output
------
results: dict
Train and test set results.
"""
start_time = time()
def get_metrics(y_true, y_pred):
"""
Measures of classification accuracy.
"""
return {'acc': np.mean(y_true == y_pred)}
clf.fit(X_tr, y_tr)
y_hat_tr = clf.predict(X_tr)
y_hat_tst = clf.predict(X_tst)
results = {'tr': get_metrics(y_tr, y_hat_tr),
'tst': get_metrics(y_tst, y_hat_tst),
'runtime': time() - start_time}
return results
def get_pi_acc(Pi_est, Pi_true, method='random_walk', **kwargs):
"""
Computes the graph edit distance between the sparsity graphs.
"""
A_est = get_adjmat_bp(Pi_est > 0)
A_true = get_adjmat_bp(Pi_true > 0)
G_est = nx.from_numpy_array(A_est)
G_true = nx.from_numpy_array(A_true)
sim = graph_similarity(G_est, G_true, method=method, **kwargs)
return sim
def graph_similarity(G, H, method='random_walk', **kwargs):
"""
Parameters
----------
G, H: nx.Graph
"""
assert method in ['random_walk']
if method == 'random_walk':
kernel = RandomWalk(**kwargs)
return kernel.fit_transform(graph_from_networkx([G, H]))[0, 1]
def get_n_comp_seq(true_n_components, pm):
return np.arange(max(1, true_n_components - pm), true_n_components + pm)
def get_empirical_pi(Y, shape, scale='prob'):
assert scale in ['prob', 'counts']
pi_empir = np.zeros(shape)
pairs = Counter(tuple(Y[i, :]) for i in range(Y.shape[0]))
for k in pairs.keys():
pi_empir[k[0], k[1]] = pairs[k]
if scale == 'prob':
pi_empir = pi_empir / pi_empir.sum()
return pi_empir
def extract_tuning_param_vals(df):
vals = []
for tune_param in df['tuning_param_values']:
assert len(list(tune_param.keys())) == 1
param_name = list(tune_param.keys())[0]
vals.append(tune_param[param_name])
vals =
|
pd.Series(vals, index=df.index, name=param_name)
|
pandas.Series
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pickle
import signal
import sys
from datetime import timedelta
from os import listdir
from os.path import isfile, join, exists
import catalyst.protocol as zp
import logbook
import pandas as pd
from catalyst.algorithm import TradingAlgorithm
from catalyst.constants import LOG_LEVEL
from catalyst.exchange.exchange_blotter import ExchangeBlotter
from catalyst.exchange.exchange_errors import (
ExchangeRequestError,
OrderTypeNotSupported)
from catalyst.exchange.exchange_execution import ExchangeLimitOrder
from catalyst.exchange.live_graph_clock import LiveGraphClock
from catalyst.exchange.simple_clock import SimpleClock
from catalyst.exchange.utils.exchange_utils import (
save_algo_object,
get_algo_object,
get_algo_folder,
get_algo_df,
save_algo_df,
clear_frame_stats_directory,
remove_old_files,
group_assets_by_exchange, )
from catalyst.exchange.utils.stats_utils import \
get_pretty_stats, stats_to_s3, stats_to_algo_folder
from catalyst.finance.execution import MarketOrder
from catalyst.finance.performance import PerformanceTracker
from catalyst.finance.performance.period import calc_period_stats
from catalyst.finance.order import Order
from catalyst.gens.tradesimulation import AlgorithmSimulator
from catalyst.marketplace.marketplace import Marketplace
from catalyst.utils.api_support import api_method
from catalyst.utils.input_validation import error_keywords, ensure_upper_case
from catalyst.utils.math_utils import round_nearest
from catalyst.utils.preprocess import preprocess
from redo import retry
log = logbook.Logger('exchange_algorithm', level=LOG_LEVEL)
class ExchangeAlgorithmExecutor(AlgorithmSimulator):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
class ExchangeTradingAlgorithmBase(TradingAlgorithm):
def __init__(self, *args, **kwargs):
self.exchanges = kwargs.pop('exchanges', None)
self.simulate_orders = kwargs.pop('simulate_orders', None)
super(ExchangeTradingAlgorithmBase, self).__init__(*args, **kwargs)
self.current_day = None
if self.simulate_orders is None and \
self.sim_params.arena == 'backtest':
self.simulate_orders = True
# Operations with retry features
self.attempts = dict(
get_transactions_attempts=5,
order_attempts=5,
synchronize_portfolio_attempts=5,
get_order_attempts=5,
get_open_orders_attempts=5,
cancel_order_attempts=5,
get_spot_value_attempts=5,
get_history_window_attempts=5,
retry_sleeptime=5,
get_orderbook_attempts=5,
)
self.blotter = ExchangeBlotter(
data_frequency=self.data_frequency,
# Default to NeverCancel in catalyst
cancel_policy=self.cancel_policy,
simulate_orders=self.simulate_orders,
exchanges=self.exchanges,
attempts=self.attempts,
)
self._marketplace = None
@staticmethod
def __convert_order_params_for_blotter(limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if stop_price:
raise OrderTypeNotSupported(order_type='stop')
if style:
if limit_price is not None:
raise ValueError(
'An order style and a limit price was included in the '
'order. Please pick one to avoid any possible conflict.'
)
# Currently limiting order types or limit and market to
# be in-line with CXXT and many exchanges. We'll consider
# adding more order types in the future.
if not isinstance(style, ExchangeLimitOrder) or \
not isinstance(style, MarketOrder):
raise OrderTypeNotSupported(
order_type=style.__class__.__name__
)
return style
if limit_price:
return ExchangeLimitOrder(limit_price)
else:
return MarketOrder()
@api_method
def set_commission(self, maker=None, taker=None):
"""Sets the maker and taker fees of the commission model for the simulation.
Parameters
----------
maker : float
The taker fee - taking from the order book.
taker : float
The maker fee - adding to the order book.
"""
key = list(self.blotter.commission_models.keys())[0]
if maker is not None:
self.blotter.commission_models[key].maker = maker
if taker is not None:
self.blotter.commission_models[key].taker = taker
@api_method
def set_slippage(self, slippage=None):
"""Set the slippage of the fixed slippage model used by the simulation.
Parameters
----------
slippage : float
The slippage to be set.
"""
key = list(self.blotter.slippage_models.keys())[0]
if slippage is not None:
self.blotter.slippage_models[key].slippage = slippage
def _calculate_order(self, asset, amount,
limit_price=None, stop_price=None, style=None):
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(asset,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(limit_price,
stop_price,
style)
return amount, style
def _calculate_order_target_amount(self, asset, target):
"""
removes order amounts so we won't run into issues
when two orders are placed one after the other.
it then proceeds to removing positions amount at TradingAlgorithm
:param asset:
:param target:
:return: target
"""
if asset in self.blotter.open_orders:
for open_order in self.blotter.open_orders[asset]:
current_amount = open_order.amount
target -= current_amount
target = super(ExchangeTradingAlgorithmBase, self). \
_calculate_order_target_amount(asset, target)
return target
def round_order(self, amount, asset):
"""
We need fractions with cryptocurrencies
:param amount:
:return:
"""
return round_nearest(amount, asset.min_trade_size)
@api_method
def get_dataset(self, data_source_name, start=None, end=None):
if self._marketplace is None:
self._marketplace = Marketplace()
return self._marketplace.get_dataset(
data_source_name, start, end,
)
@api_method
@preprocess(symbol_str=ensure_upper_case)
def symbol(self, symbol_str, exchange_name=None):
"""Lookup a Trading pair by its ticker symbol.
Catalyst defines its own set of "universal" symbols to reference
trading pairs across exchanges. This is required because exchanges
are not adhering to a universal symbolism. For example, Bitfinex
uses the BTC symbol for Bitcon while Kraken uses XBT. In addition,
pairs are sometimes presented differently. For example, Bitfinex
puts the market currency before the base currency without a
separator, Bittrex puts the quote currency first and uses a dash
seperator.
Here is the Catalyst convention: [Base Currency]_[Quote Currency]
For example: btc_usd, eth_btc, neo_eth, ltc_eur.
The symbol for each currency (e.g. btc, eth, ltc) is generally
aligned with the Bittrex exchange.
Parameters
----------
symbol_str : str
The ticker symbol for the TradingPair to lookup.
exchange_name: str
The name of the exchange containing the symbol
Returns
-------
tradingPair : TradingPair
The TradingPair that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
"""
# If the user has not set the symbol lookup date,
# use the end_session as the date for sybmol->sid resolution.
_lookup_date = self._symbol_lookup_date \
if self._symbol_lookup_date is not None \
else self.sim_params.end_session
if exchange_name is None:
exchange = list(self.exchanges.values())[0]
else:
exchange = self.exchanges[exchange_name]
data_frequency = self.data_frequency \
if self.sim_params.arena == 'backtest' else None
return self.asset_finder.lookup_symbol(
symbol=symbol_str,
exchange=exchange,
data_frequency=data_frequency,
as_of_date=_lookup_date
)
def prepare_period_stats(self, start_dt, end_dt):
"""
Creates a dictionary representing the state of the tracker.
Parameters
----------
start_dt: datetime
end_dt: datetime
Notes
-----
I rewrote this in an attempt to better control the stats.
I don't want things to happen magically through complex logic
pertaining to backtesting.
"""
tracker = self.perf_tracker
cum = tracker.cumulative_performance
pos_stats = cum.position_tracker.stats()
period_stats = calc_period_stats(pos_stats, cum.ending_cash)
stats = dict(
period_start=tracker.period_start,
period_end=tracker.period_end,
capital_base=tracker.capital_base,
progress=tracker.progress,
ending_value=cum.ending_value,
ending_exposure=cum.ending_exposure,
capital_used=cum.cash_flow,
starting_value=cum.starting_value,
starting_exposure=cum.starting_exposure,
starting_cash=cum.starting_cash,
ending_cash=cum.ending_cash,
portfolio_value=cum.ending_cash + cum.ending_value,
pnl=cum.pnl,
returns=cum.returns,
period_open=start_dt,
period_close=end_dt,
gross_leverage=period_stats.gross_leverage,
net_leverage=period_stats.net_leverage,
short_exposure=pos_stats.short_exposure,
long_exposure=pos_stats.long_exposure,
short_value=pos_stats.short_value,
long_value=pos_stats.long_value,
longs_count=pos_stats.longs_count,
shorts_count=pos_stats.shorts_count,
)
# Merging cumulative risk
stats.update(tracker.cumulative_risk_metrics.to_dict())
# Merging latest recorded variables
stats.update(self.recorded_vars)
period = tracker.todays_performance
stats['positions'] = period.position_tracker.get_positions_list()
# we want the key to be absent, not just empty
# Only include transactions for given dt
stats['transactions'] = []
for date in period.processed_transactions:
if start_dt <= date < end_dt:
transactions = period.processed_transactions[date]
for t in transactions:
stats['transactions'].append(t.to_dict())
stats['orders'] = []
for date in period.orders_by_modified:
if start_dt <= date < end_dt:
orders = period.orders_by_modified[date]
for order in orders:
stats['orders'].append(orders[order].to_dict())
return stats
def run(self, data=None, overwrite_sim_params=True):
data.attempts = self.attempts
return super(ExchangeTradingAlgorithmBase, self).run(
data, overwrite_sim_params
)
class ExchangeTradingAlgorithmBacktest(ExchangeTradingAlgorithmBase):
def __init__(self, *args, **kwargs):
super(ExchangeTradingAlgorithmBacktest, self).__init__(*args, **kwargs)
self.frame_stats = list()
self.state = {}
log.info('initialized trading algorithm in backtest mode')
def is_last_frame_of_day(self, data):
# TODO: adjust here to support more intervals
next_frame_dt = data.current_dt + timedelta(minutes=1)
if next_frame_dt.date() > data.current_dt.date():
return True
else:
return False
def handle_data(self, data):
super(ExchangeTradingAlgorithmBacktest, self).handle_data(data)
if self.data_frequency == 'minute':
frame_stats = self.prepare_period_stats(
data.current_dt, data.current_dt + timedelta(minutes=1)
)
self.frame_stats.append(frame_stats)
self.current_day = data.current_dt.floor('1D')
def _create_stats_df(self):
stats = pd.DataFrame(self.frame_stats)
stats.set_index('period_close', inplace=True, drop=False)
return stats
def analyze(self, perf):
stats = self._create_stats_df() if self.data_frequency == 'minute' \
else perf
super(ExchangeTradingAlgorithmBacktest, self).analyze(stats)
def run(self, data=None, overwrite_sim_params=True):
perf = super(ExchangeTradingAlgorithmBacktest, self).run(
data, overwrite_sim_params
)
# Rebuilding the stats to support minute data
stats = self._create_stats_df() if self.data_frequency == 'minute' \
else perf
return stats
class ExchangeTradingAlgorithmLive(ExchangeTradingAlgorithmBase):
def __init__(self, *args, **kwargs):
self.algo_namespace = kwargs.pop('algo_namespace', None)
self.live_graph = kwargs.pop('live_graph', None)
self.stats_output = kwargs.pop('stats_output', None)
self._analyze_live = kwargs.pop('analyze_live', None)
self.start = kwargs.pop('start', None)
self.is_start = kwargs.pop('is_start', True)
self.end = kwargs.pop('end', None)
self.is_end = kwargs.pop('is_end', True)
self._clock = None
self.frame_stats = list()
# erase the frame_stats folder to avoid overloading the disk
error = clear_frame_stats_directory(self.algo_namespace)
if error:
log.warning(error)
# in order to save paper & live files separately
self.mode_name = 'paper' if kwargs['simulate_orders'] else 'live'
self.pnl_stats = get_algo_df(
self.algo_namespace,
'pnl_stats_{}'.format(self.mode_name),
)
self.custom_signals_stats = get_algo_df(
self.algo_namespace,
'custom_signals_stats_{}'.format(self.mode_name)
)
self.exposure_stats = get_algo_df(
self.algo_namespace,
'exposure_stats_{}'.format(self.mode_name)
)
self.is_running = True
self.stats_minutes = 1
self._last_orders = []
self._last_open_orders = []
self.trading_client = None
super(ExchangeTradingAlgorithmLive, self).__init__(*args, **kwargs)
try:
signal.signal(signal.SIGINT, self.signal_handler)
except ValueError:
log.warn("Can't initialize signal handler inside another thread."
"Exit should be handled by the user.")
def get_frame_stats(self):
"""
preparing the stats before analyze
:return: stats: pd.Dataframe
"""
# add the last day stats which is not saved in the directory
current_stats = pd.DataFrame(self.frame_stats)
current_stats.set_index('period_close', drop=False, inplace=True)
# get the location of the directory
algo_folder = get_algo_folder(self.algo_namespace)
folder = join(algo_folder, 'frame_stats')
if exists(folder):
files = [f for f in listdir(folder) if isfile(join(folder, f))]
period_stats_list = []
for item in files:
filename = join(folder, item)
with open(filename, 'rb') as handle:
perf_period = pickle.load(handle)
period_stats_list.extend(perf_period)
stats = pd.DataFrame(period_stats_list)
stats.set_index('period_close', drop=False, inplace=True)
return pd.concat([stats, current_stats])
else:
return current_stats
def interrupt_algorithm(self):
"""
when algorithm comes to an end this function is called.
extracts the stats and calls analyze.
after finishing, it exits the run.
Parameters
----------
Returns
-------
"""
self.is_running = False
if self._analyze is None:
log.info('Exiting the algorithm.')
else:
log.info('Exiting the algorithm. Calling `analyze()` '
'before exiting the algorithm.')
stats = self.get_frame_stats()
self.analyze(stats)
sys.exit(0)
def signal_handler(self, signal, frame):
"""
Handles the keyboard interruption signal.
Parameters
----------
signal
frame
Returns
-------
"""
log.info('Interruption signal detected {}, exiting the '
'algorithm'.format(signal))
self.interrupt_algorithm()
@property
def clock(self):
if self._clock is None:
return self._create_clock()
else:
return self._clock
def _create_clock(self):
# The calendar's execution times are the minutes over which we actually
# want to run the clock. Typically the execution times simply adhere to
# the market open and close times. In the case of the futures calendar,
# for example, we only want to simulate over a subset of the full 24
# hour calendar, so the execution times dictate a market open time of
# 6:31am US/Eastern and a close of 5:00pm US/Eastern.
# In our case, we are trading around the clock, so the market close
# corresponds to the last minute of the day.
# This method is taken from TradingAlgorithm.
# The clock has been replaced to use RealtimeClock
# TODO: should we apply time skew? not sure to understand the utility.
log.debug('creating clock')
if self.live_graph or self._analyze_live is not None:
self._clock = LiveGraphClock(
self.sim_params.sessions,
context=self,
callback=self._analyze_live,
start=self.start if self.is_start else None,
end=self.end if self.is_end else None
)
else:
self._clock = SimpleClock(
self.sim_params.sessions,
start=self.start if self.is_start else None,
end=self.end if self.is_end else None
)
return self._clock
def _init_trading_client(self):
"""
This replaces Ziplines `_create_generator` method. The main difference
is that we are restoring performance tracker objects if available.
This allows us to stop/start algos without loosing their state.
"""
self.state = get_algo_object(
algo_name=self.algo_namespace,
key='context.state_{}'.format(self.mode_name),
)
if self.state is None:
self.state = {}
if self.perf_tracker is None:
# Note from the Zipline dev:
# HACK: When running with the `run` method, we set perf_tracker to
# None so that it will be overwritten here.
tracker = self.perf_tracker = PerformanceTracker(
sim_params=self.sim_params,
trading_calendar=self.trading_calendar,
env=self.trading_environment,
)
# Set the dt initially to the period start by forcing it to change.
self.on_dt_changed(self.sim_params.start_session)
new_position_tracker = tracker.position_tracker
tracker.position_tracker = None
# Unpacking the perf_tracker and positions if available
cum_perf = get_algo_object(
algo_name=self.algo_namespace,
key='cumulative_performance_{}'.format(self.mode_name),
)
if cum_perf is not None:
tracker.cumulative_performance = cum_perf
# Ensure single common position tracker
tracker.position_tracker = cum_perf.position_tracker
today =
|
pd.Timestamp.utcnow()
|
pandas.Timestamp.utcnow
|
import pandas as pd
from pathlib import Path
import os
import numpy as np
import datetime
from pickle_plotting import get_file_paths
import logarithmoforecast as lf
import holidays
def pickle_directory(datasets_dir, pickle_dir):
file_paths = os.listdir(datasets_dir)
sdp_series = {}
for path in file_paths:
number = Path(path).stem
print(number)
df = pd.read_csv(datasets_dir / path, header=4, sep=';', usecols=[0, 1, 2, 3, 4, 5], decimal=",")
# df = pd.read_csv(r"/home/joelhaubold/Dokumente/BADaten/FiN-Messdaten-LV_Spannung_Teil2/tmpFile-1492693540182.csv", header=4, sep=';', usecols=[0, 1, 2, 3, 4, 5], decimal=",")
df.drop(columns=['AliasName', 'Unit'])
df = df.set_index('TimeStamp')
df = df.sort_index()
sdp_list = df.ServiceDeliveryPoint.unique()
print(sdp_list)
for sdp in sdp_list:
df_sdp = df.loc[df.ServiceDeliveryPoint == sdp, :] # Slim the pd down here for less memory consumption?
if sdp in sdp_series:
combined_df = sdp_series.get(sdp)
combined_df = pd.concat([combined_df, df_sdp]).sort_index()
sdp_series[sdp] = combined_df
else:
sdp_series[sdp] = df_sdp
for key, value in sdp_series.items():
print(key)
if not os.path.exists(pickle_dir / key):
os.makedirs(pickle_dir / key)
value.index = pd.to_datetime(value.index)
pos1 = value.Description == 'Electric voltage momentary phase 1 (notverified)'
df_phase1 = value.loc[pos1, :]
pos2 = value.Description == 'Electric voltage momentary phase 2 (notverified)'
df_phase2 = value.loc[pos2, :]
pos3 = value.Description == 'Electric voltage momentary phase 3 (notverified)'
df_phase3 = value.loc[pos3, :]
# for phase in ['1', '2', '3']:
# if not os.path.exists('pickles/' + key + '/phase'+phase):
# os.makedirs('pickles/' + key + '/phase'+phase)
df_phase1.to_pickle(pickle_dir / key / "phase1")
df_phase2.to_pickle(pickle_dir / key / "phase2")
df_phase3.to_pickle(pickle_dir / key / "phase3")
# value.to_pickle(r"pickles/"+key+"/3PhasesDF")
def add_help_data(pickle_dir=Path('pickles')):
file_paths = get_file_paths(pickle_dir)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
print("Opened pickle")
phase_values = pd.DataFrame()
for i, df_p in enumerate(df_phases):
df_p.drop(columns=['Unit', 'AliasName'], inplace=True)
phase = 'p' + str(i + 1)
phase_values[phase] = df_p.Value
for df_p in df_phases:
df_p['row_dif'] = df_p.Value.diff()
print("Created help values")
np.diff(phase_values.values)
phase_values['max_dif'] = phase_values.apply(
lambda row: max(abs(row['p1'] - row['p2']), abs(row['p1'] - row['p3']),
abs(row['p2'] - row['p3'])), axis=1)
print("Calculated help data")
for df_p in df_phases:
df_p['phase_dif'] = phase_values['max_dif']
print("Assigned help data")
for i, df_p in enumerate(df_phases):
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(i + 1)))
def update_trafo(pickle_dir=Path('pickles')):
# pd.options.mode.chained_assignment = None
file_paths = get_file_paths(pickle_dir)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
print("Opened pickle")
df_row_difs = pd.DataFrame()
for p, df_p in enumerate(df_phases):
df_p['row_dif'] = df_p.Value.diff() / df_p.Value.index.to_series().diff().dt.total_seconds()
df_row_difs[str(p)] = df_p['row_dif']
df_row_difs.loc[True ^ (((df_row_difs['0'] >= 0) & (df_row_difs['1'] >= 0) & (df_row_difs['2'] >= 0)) | (
(df_row_difs['0'] < 0) & (df_row_difs['1'] < 0) & (df_row_difs['2'] < 0)))] = 0
df_row_difs = df_row_difs.abs()
for df_p in df_phases:
# df_p['trafo'] = min(df_phases[0]['row_dif'].abs(), df_phases[1]['row_dif'].abs(), df_phases[2]['row_dif'].abs())
df_p['trafo'] = df_row_difs.min(axis=1)
print("Assigned help data")
for i, df_p in enumerate(df_phases):
# print(df_p)
df_p.to_pickle(path / ("h_phase" + str(i + 1)))
def add_seasonal_data(pickle_dir=Path('pickles')):
seasonal_data = pd.DataFrame()
file_paths = get_file_paths(pickle_dir)
print(file_paths)
day = pd.Timedelta('1d')
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p))[['Value']], ['1', '2', '3']))
weekday_dfs_phases = [[None for x in range(7)] for y in range(3)]
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
for p, df_p in enumerate(df_phases):
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
df_p_day = df_p.loc[start_time:end_time]
df_p_day_med = df_p_day.resample('30s').median().rename(columns={'Value': str(start_time.date())})
df_p_day_med.index = df_p_day_med.index.time
weekday = start_time.date().weekday()
# print(weekday_dfs_phases[p][weekday])
if weekday_dfs_phases[p][weekday] is None:
weekday_df = df_p_day_med
weekday_dfs_phases[p][weekday] = weekday_df
else:
weekday_df = weekday_dfs_phases[p][weekday]
weekday_df = weekday_df.join(df_p_day_med, how='outer')
weekday_dfs_phases[p][weekday] = weekday_df
print("Split DF")
for p, df_weekdays in enumerate(weekday_dfs_phases):
for w, df in enumerate(df_weekdays):
df['med'] = df.median(axis=1)
# print(df)
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
print(df_phases_h)
for p, df_p in enumerate(df_phases_h):
print(p)
df_weekdays = weekday_dfs_phases[p]
df_p['SeasDif'] = df_p.apply(lambda row: (row['Value'] - df_weekdays[row.name.weekday()].loc[
(row.name - datetime.timedelta(seconds=row.name.second % 30,
microseconds=row.name.microsecond)).time()]['med']), axis=1)
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(p + 1)))
def add_new_seasonal_data(pickle_dir=Path('pickles')):
file_paths = get_file_paths(pickle_dir)
for path in file_paths:
station_season =
|
pd.read_pickle(pickle_dir / (path + 'season_aggregation'))
|
pandas.read_pickle
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat":
|
Categorical([1, 2], categories=df.cat.cat.categories)
|
pandas.Categorical
|
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Sample_exist(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Label_exist(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import logging as log
from datetime import date
from collections import OrderedDict
def get_tests(df, tests, value='has_passed'):
archiving = df[['archiving_validator']].replace({0: 'No Data'})
if archiving.empty:
log.warning('No tests found.')
return None
archiving['version'] = archiving.apply(lambda row: row['archiving_validator']['version']
if row['archiving_validator'] != 'No Data' else row['archiving_validator'], axis=1)
for t in tests:
archiving[t] = archiving.apply(lambda row: row['archiving_validator'].get(t, {value: 'No Data'})[value]
if row['archiving_validator'] != 'No Data' else row['archiving_validator'],
axis=1)
columns = list(tests)
columns.append('version')
return archiving[columns]
def which_sessions_have_validators(br):
# make a list of all existing validators
validators = set()
for r in list(br['BBRC_Validators']):
for e in r:
validators.add(e)
vl, count = {}, {}
# for each validator make a list of sessions having it
for v in validators:
has_val, has_not_val = [], []
for r, s in zip(br['BBRC_Validators'], br['Session']):
if v in r:
has_val.append(s)
else:
has_not_val.append(s)
vl[v] = {'Sessions with Validator': has_val,
'Sessions without Validator': has_not_val}
count[v] = {'Sessions with Validator': len(has_val),
'Sessions without Validator': len(has_not_val)}
d = {'count': count, 'list': vl}
series =
|
pd.Series([x for e in br['BBRC_Validators'] for x in e])
|
pandas.Series
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib.lines import Line2D
sns.set(context="paper")
sns.set_palette("colorblind")
def plot_results(dataset: str, title: str):
df_NN = pd.read_csv(f"./results/{dataset}/nn.csv")
df_NN["Methode"] = "NN"
df_dta =
|
pd.read_csv(f"./results/{dataset}/dta.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df =
|
pd.read_sql(query, conn)
|
pandas.read_sql
|
import finterstellar as fs
import pandas as pd
import numpy as np
import datetime as dt
class LoadData:
def read_investing_price(self, path, cd):
file_name = path + cd + ' Historical Data.csv'
df = pd.read_csv(file_name, index_col='Date')
return (df)
def create_portfolio_df(self, path, p_name, p_cd):
new_df = self.make_historical_price_df(path, p_cd)
prices_df = self.create_master_file(path, p_name, new_df)
prices_df = self.update_master_file(path, p_name, new_df)
return (prices_df)
def make_historical_price_df(self, path, s_cd):
cds = fs.str_list(s_cd)
dates = pd.Series()
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
c = prices_df['Price']
dates_new = pd.Series(prices_df.index)
dates = dates.append(dates_new)
dates = dates.drop_duplicates().sort_values().reset_index()
dates = dates.drop(['index'], axis=1)
universe_df = pd.DataFrame(index=dates[0])
universe_df.index.name = 'Date'
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
prices_df = self.price_df_trimming(prices_df, c)
universe_df[c] = prices_df[c]
universe_df
universe_df = universe_df.fillna(method='ffill')
return (universe_df)
def create_master_file(self, path, f_name, df):
file_name = path + 'fs ' + f_name + '.csv'
try:
f = open(file_name)
print('Updating master file')
f.close()
except IOError as e:
df.index = pd.to_datetime(df.index)
df.index.name = 'Date'
#df = df.fillna(method='ffill')
#today_date = pd.Timestamp.today().date().strftime('%y%m%d')
df.to_csv(file_name)
return (df)
def update_master_file(self, path, n, new_df):
try:
file_name = 'fs ' + n + '.csv'
master_df = self.read_master_file(path, n)
universe_df = new_df.combine_first(master_df)
universe_df.index.name = 'Date'
#universe_df = universe_df.fillna(method='ffill')
universe_df.to_csv(path + file_name)
except IOError as e:
print('Creating master file')
self.create_master_file(path, n, new_df)
universe_df = new_df
return (universe_df)
def read_master_file(self, path, n):
file_name = path + 'fs ' + n + '.csv'
prices_df =
|
pd.read_csv(file_name, index_col='Date')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from scipy import interpolate
import json
from datetime import datetime, timedelta
import requests
import io
print("RUNNING!")
print(datetime.now())
def myconverter(o):
if isinstance(o, datetime):
return o.__str__()
url = "https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv"
s = requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df = df[df['Area type'] == 'ltla']
df['Specimen date'] =
|
pd.to_datetime(df['Specimen date'])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_validate
from pandas.api.types import is_numeric_dtype
import statsmodels.api as sm
import warnings
import time
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
class LinearRegressionClass:
def __init__(self,df,response,sig_level=0.05,max_iter=500,cols_to_keep_static=[],cols_to_try_individually=[]):
'''
:param df: a dataframe
:param response: a string. This must be an existing column in df
:param sig_level: a float. The significance level the forward selection will use
:param max_iter: an integer. The maximum iterations the solvers will use to try to converge
:param cols_to_keep_static: a list. Used in forward selection to not omit these columns
:param cols_to_try_individually: a list. The columns to test in a regression one at a time to identify which
one has the greatest relationship with the response controlled for the cols_to_keep_static
'''
# attach attributes to the object
self.df = df.copy()
self.response = response
self.sig_level = sig_level
self.max_iter=max_iter
self.warnings = ''
self.error_message = ''
self.cols_to_keep_static = cols_to_keep_static
self.cols_to_try_individually = cols_to_try_individually
if self.response in self.cols_to_keep_static:
print('The response - {} is in the static columns. Removed it.'.format(response))
self.cols_to_keep_static = list(filter(lambda x: x != self.response,self.cols_to_keep_static))
if self.response in self.cols_to_try_individually:
print('The response - {} is in the cols to try individually columns. Removed it.'.format(response))
self.cols_to_try_individually = list(filter(lambda x: x != self.response,self.cols_to_try_individually))
def prepare_data(self,df,response):
y = df[response]
X = df[list(filter(lambda x: x != response, df.columns))]
X = sm.add_constant(X, has_constant='add')
return X, y
def linear_regression_utility_check_response(self,series):
if (not is_numeric_dtype(series)):
self.error_message = self.error_message + '\n' + 'The response variable should be numeric type'
print('The response variable should be numeric type')
return False
return True
def lin_reg_diagnostic_performance(self,X,y):
cvs = cross_validate(LinearRegression(), X, y, cv=5,
scoring=['r2', 'neg_mean_squared_error', 'neg_root_mean_squared_error'])
s = """Performance\n5-Fold Cross Validation Results:\nTest Set r2 = {}\nneg_mean_squared_error = {}\nneg_root_mean_squared_error = {}""".format(
round(cvs['test_r2'].mean(), 2), round(cvs['test_neg_mean_squared_error'].mean(), 2),
round(cvs['test_neg_root_mean_squared_error'].mean(), 2))
self.performance = s
self.performance_df = pd.DataFrame(data=[round(cvs['test_r2'].mean(), 2), round(cvs['test_neg_mean_squared_error'].mean(), 2),
round(cvs['test_neg_root_mean_squared_error'].mean(), 2)],
index=['test_r2','test_neg_mean_squared_error','test_neg_root_mean_squared_error'],
columns=['Score'])
return s
def lin_reg_diagnostic_correlations(self,X):
print("Correlations")
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
upp_mat = np.triu(X.corr())
sns.heatmap(X.corr(), vmin=-1, vmax=+1, annot=True, cmap='coolwarm', mask=upp_mat, ax=ax)
self.fig_correlations = fig
self.ax_correlations = ax
return fig,ax
def linear_regression_get_report(self,model,X,y,verbose=True):
pass
def prepare_categories(self,df, response, drop=False):
cat_cols = list(filter(lambda x: not is_numeric_dtype(df[x]), df.columns))
cat_cols = list(set(cat_cols) - {response} - set(self.cols_to_keep_static))
df = pd.get_dummies(df, columns=cat_cols, drop_first=drop)
df = pd.get_dummies(df, columns=self.cols_to_keep_static, drop_first=True)
self.cols_to_keep_static_dummified = []
for col in self.cols_to_keep_static:
for col_dummy in df.columns:
if col in col_dummy:
self.cols_to_keep_static_dummified.append(col_dummy)
return df
def get_interpretation(self,result=None,feature_list=None,df=None):
'''
Given a trained model, calculate the average probabilities due to feature changes
'''
if (result is None) or (feature_list is None):
try:
feature_list = self.X_with_feature_selection.columns
result = self.result_with_feature_selection
except:
feature_list = self.X.columns
try:
result = self.result
except:
result = self.basic_result
# take a copy of the original df and prepare the dataset
if df is None:
df = self.df.copy()
df_temp = df.copy()
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp,self.response)
full_feature_list = list(feature_list)
if 'const' not in full_feature_list:
full_feature_list = ['const'] + full_feature_list
# comparative uplift section
comparative_dict = dict()
for col1 in df.columns:
for col2 in full_feature_list:
# if this feature was dummified
if col1 + '_' in col2:
t = X[full_feature_list].copy()
# First get prediction with 0
t[col2] = 0
comparative_dict[col2] = [result.predict(t).mean()]
# Then get prediction with 1
t[col2] = 1
comparative_dict[col2].append(result.predict(t).mean())
elif col1 == col2:
t = X[full_feature_list].copy()
# first get prediction with average
t[col2] = t[col2].mean()
comparative_dict[col2] = [result.predict(t).mean()]
# then get prediction with +1
t[col2] = t[col2] + 1
comparative_dict[col2].append(result.predict(t).mean())
feature_interpretability_comparative_df = pd.DataFrame(comparative_dict).T
feature_interpretability_comparative_df.columns = ['Prediction_average_or_without','Prediction_add1_or_with']
feature_interpretability_comparative_df['diff'] = feature_interpretability_comparative_df['Prediction_add1_or_with'] - feature_interpretability_comparative_df['Prediction_average_or_without']
self.feature_interpretability_comparative_df = feature_interpretability_comparative_df
# get a base probability (this is just the average probability)
base_probability = result.predict(X[full_feature_list]).mean()
probability_dict = dict()
probability_dict['base'] = base_probability
# for each column in the original df
for col in df.columns:
# for each column in the result's feature list
for col2 in feature_list:
# check if this feature was dummified from this column
if col + '_' in col2:
# if this feature was dummified from this column then update this column to be this feature value
df_temp = df.copy()
df_temp[col] = col2.replace(col + '_', '')
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp, self.response)
# check that all features the model is expecting exist in X
for col3 in feature_list:
if col3 not in X.columns:
X[col3] = 0
# calculate the probability
probability = result.predict(X[full_feature_list]).mean()
probability_dict[col2] = probability
elif col == col2:
# if this column was not dummified then it is numeric so add 1 to it
df_temp = df.copy()
df_temp[col] = df_temp[col] + 1
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp, self.response)
probability = result.predict(X[full_feature_list]).mean()
probability_dict[col2] = probability
# save the probability dictionary
self.feature_interpretability_dict = probability_dict
self.feature_interpretability_df = pd.DataFrame(data=probability_dict.values(), index=probability_dict.keys(), columns=['Probability'])
return self.feature_interpretability_df
def lin_reg_basic(self,df=None):
'''
Run a basic logistic regression model
'''
if df is None:
df = self.df
X, y = self.prepare_data(df, self.response)
model = sm.OLS(y, X)
result = model.fit(maxiter=self.max_iter)
self.basic_result = result
self.basic_model = model
self.X = X
self.y = y
return result
def predict_from_original(self,df):
df = self.prepare_categories(df, self.response, drop=False)
all_cols = []
try:
all_cols = list(self.X_with_feature_selection.columns)
except:
all_cols = list(self.X.columns)
for col in all_cols:
if col not in df.columns:
df[col] = 0
res = None
try:
res = self.result_with_feature_selection
except:
res = self.result
return res.predict(df[all_cols])
def lin_reg(self,df=None):
if df is None:
df1 = self.df[~self.df.isna().any(axis=1)].copy()
if len(df1) < len(self.df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(len(self.df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
else:
df1 = df[~df.isna().any(axis=1)].copy()
if len(df1) < len(df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
if not self.linear_regression_utility_check_response(df1[self.response]):
return None
df1 = self.prepare_categories(df1,self.response,drop=True)
result = self.lin_reg_basic(df1)
self.result = result
self.model = self.basic_model
return result
def lin_reg_with_feature_selection(self,df=None,run_for=0,verbose=True):
# start the timer in case the is a time limit specified
start_time = time.time()
if df is None:
# get rid of nans. There should be no nans. Imputation should be performed prior to this point
df1 = self.df[~self.df.isna().any(axis=1)].copy()
# show a warning to let the user know of the droppped nans
if len(df1) < len(self.df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(self.df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
else:
# get rid of nans. There should be no nans. Imputation should be performed prior to this point
df1 = df[~df.isna().any(axis=1)].copy()
# show a warning to let the user know of the droppped nans
if len(df1) < len(df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
# check that the response is in the correct format to perform linear regression
if not self.linear_regression_utility_check_response(df1[self.response]):
return None
# automatically identify categorical variables and dummify them
df1 = self.prepare_categories(df1, self.response, drop=False)
# raise a warning if the number of columns surpasses the number of rows
if len(df1.columns) > len(df1):
warnings.warn(
'Note: The number of columns after getting dummies is larger than the number of rows. n_cols = {}, nrows = {}'.format(
len(df1.columns), len(df1)))
print(
'Note: The number of columns after getting dummies is larger than the number of rows. n_cols = {}, nrows = {}'.format(
len(df1.columns), len(df1)))
# the initial list of features
remaining = list(set(df1.columns) - {self.response} - set(self.cols_to_keep_static_dummified))
# this holds the tried and successful feature set
full_feature_set = self.cols_to_keep_static_dummified
# get the first linear regression output for only the constant/base model
first_result = self.lin_reg_basic(df1[[self.response]])
# save the model and the X and y used to train it
self.X_with_feature_selection = self.X.copy()
self.y_with_feature_selection = self.y.copy()
self.model_with_feature_selection = self.basic_model
# get the r2 of the base model
rsquared = first_result.rsquared
# store the result of the first model
final_result = first_result
# while there are still remaining features to try keep looping
while len(remaining) > 0:
# store the last pseudo r2 value
last_rsquared = rsquared
# the next feature to add to the full feature set
next_col = None
# the result corresponding to the addition of the next col
next_result = None
# try adding each column from the remaining columns
for col in sorted(remaining):
# add the next column to the feature set and try it out. Try except is added because sometimes
# when categorical variables are dummified and you add both variables you get a singular matrix
this_feature_set = full_feature_set + [col]
try:
result = self.lin_reg_basic(df1[this_feature_set + [self.response]])
except Exception as e:
remaining.remove(col)
continue
# the resulting r2 from this fit
this_rsquared = result.rsquared
# if a feature results in nan for r2 skip it
if this_rsquared is np.nan:
print('Note: Feature {} is resulting with a nan r2. Skipping feature'.format(col))
continue
# this feature is recorded as a candidate if the conditions are met
if (this_rsquared > last_rsquared) and (result.pvalues.loc[col] <= self.sig_level):
last_rsquared = this_rsquared
next_col = col
next_result = result
# save the model and the X and y used to train it
self.X_with_feature_selection = self.X.copy()
self.y_with_feature_selection = self.y.copy()
self.model_with_feature_selection = self.basic_model
# if after the loop no new candidates were found then we stop looking
if next_col is None:
break
# add the candidate to the permanent list
full_feature_set.append(next_col)
# show progress
if verbose:
print('********Adding {} with prsquared = {}********'.format(next_col, last_rsquared))
# store the result
final_result = next_result
# remove the chosen candidate from the remaining features
remaining.remove(next_col)
# check if it's not taking too long
if (time.time() - start_time > run_for) and (run_for > 0):
print(
'Aborting: Has been running for {}s > {}s. {} out of {} columns left. There are probably too many categories in one of the columns'.format(
round(time.time() - start_time, 2), run_for, len(remaining), len(df1.columns) - 1))
return
self.final_feature_set = full_feature_set
self.result_with_feature_selection = final_result
return final_result
def lin_reg_one_at_a_time(self,with_feature_selection=False,get_interpretability=False):
dic = dict()
df1 = self.df.copy()
df1 = df1[[self.response]+self.cols_to_keep_static + self.cols_to_try_individually].copy()
for this_col_to_try in self.cols_to_try_individually:
if with_feature_selection:
result = self.lin_reg_with_feature_selection(df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
if get_interpretability:
self.get_interpretation(self.result_with_feature_selection,self.final_feature_set
,df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
else:
result = self.lin_reg(df=df1[self.cols_to_keep_static + [self.response,this_col_to_try]])
if get_interpretability:
self.get_interpretation(self.result, self.X.columns
, df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
for col in list(filter(lambda x: this_col_to_try in x,result.params.index)):
if get_interpretability:
dic[col] = [result.params[col],result.pvalues[col],self.feature_interpretability_df['Probability'][col],
self.feature_interpretability_df['Probability']['base']]
else:
dic[col] = [result.params[col], result.pvalues[col]]
df_one_at_a_time = pd.DataFrame(dic).T
if get_interpretability:
df_one_at_a_time.columns = ['Coefficient','Pvalue','Controlled Probability','Base Probability']
else:
df_one_at_a_time.columns = ['Coefficient','Pvalue']
self.df_one_at_a_time = df_one_at_a_time
return df_one_at_a_time
def unit_test_1():
print('Unit test 1...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05)
my_linear_regresion_class.lin_reg_basic()
result_required = [110.08, 3.74, -35.75, 2.54, -0.17, 5.51, 10.21]
result_actual = list(my_linear_regresion_class.basic_result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 34.69
result_actual = my_linear_regresion_class.basic_result.predict(my_linear_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.36
neg_mean_squared_error = -1812.52
neg_root_mean_squared_error = -41.66'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X,
my_linear_regresion_class.y)
assert (result_required == result_actual)
result_required = [34.69, 38.44, -1.05, 37.23, 34.52, 40.21, 44.9]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [-1.05, 34.52, 37.23, 38.44, 40.21, 44.9]
result_actual = sorted(list(my_linear_regresion_class.feature_interpretability_comparative_df['Prediction_add1_or_with']))
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_2():
print('Unit test 2...')
import sys
import os
import warnings
np.random.seed(101)
warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Survived',sig_level=0.05)
my_linear_regresion_class.lin_reg()
result_required = [0.88, -0.19, 0.49, -0.01, -0.05, -0.01, 0.0]
result_actual = list(my_linear_regresion_class.basic_result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 0.4061624649859944
result_actual = my_linear_regresion_class.basic_result.predict(my_linear_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.36
neg_mean_squared_error = -0.15
neg_root_mean_squared_error = -0.39'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X,
my_linear_regresion_class.y)
assert (result_required == result_actual)
result_required = [0.41, 0.21, 0.89, 0.4, 0.35, 0.39, 0.41]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_3():
print('Unit test 3...')
import sys
import os
import warnings
np.random.seed(101)
warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05)
my_linear_regresion_class.lin_reg()
result_required = [112.61, 3.74, -35.75, -0.17, 5.51, 10.21, -2.54]
result_actual = list(my_linear_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 34.69
result_actual = my_linear_regresion_class.result.predict(my_linear_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.36
neg_mean_squared_error = -1812.52
neg_root_mean_squared_error = -41.66'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X,
my_linear_regresion_class.y)
assert (result_required == result_actual)
result_required = [34.69, 38.44, -1.05, 33.77, 34.52, 40.21, 44.9]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [-1.05, 33.77, 34.52, 38.44, 40.21, 44.9]
result_actual = sorted(list(my_linear_regresion_class.feature_interpretability_comparative_df['Prediction_add1_or_with']))
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [1.0, 0.83, -2.94, -3.65, -4.17, -4.59, -8.48, -8.77, -10.16]
result_actual = sorted(list(my_linear_regresion_class.predict_from_original(df)))[:-10:-1]
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_4():
print('Unit test 4...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_linear_regresion_class = LinearRegressionClass(df,'Fare',sig_level=0.05)
my_linear_regresion_class.lin_reg_with_feature_selection(verbose=False)
result_required = [106.7, -35.73, 11.05, 6.15]
result_actual = list(my_linear_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 34.69
result_actual = my_linear_regresion_class.result_with_feature_selection.predict(my_linear_regresion_class.X_with_feature_selection).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance
5-Fold Cross Validation Results:
Test Set r2 = 0.37
neg_mean_squared_error = -1798.9
neg_root_mean_squared_error = -41.44'''
result_actual = my_linear_regresion_class.lin_reg_diagnostic_performance(my_linear_regresion_class.X_with_feature_selection,
my_linear_regresion_class.y_with_feature_selection)
assert (result_required == result_actual)
result_required = [34.69, -1.04, 40.84, 45.75]
result_actual = list(my_linear_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [14.62, 4.81, 4.81, 4.81, -1.34, -1.34, -7.48, -7.48, -7.48]
result_actual = sorted(list(my_linear_regresion_class.predict_from_original(df)))[:-10:-1]
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_5():
print('Unit test 5...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df =
|
pd.read_csv(titanic_csv)
|
pandas.read_csv
|
import json
import io
from abc import ABC, abstractmethod
from pydoc import locate
import msgpack
import numpy as np
import pandas as pd
import base64
import ast
from PIL import Image as PILImage
from raymon.globals import Serializable
class RaymonDataType(Serializable, ABC):
def to_json(self):
return json.dumps(self.to_jcr())
def to_msgpack(self):
return msgpack.packb(self.to_jcr())
def class2str(self):
module = str(self.__class__.__module__)
classname = str(self.__class__.__name__)
return f"{module}.{classname}"
class Image(RaymonDataType):
def __init__(self, data, lossless=False):
self.validate(data=data, lossless=lossless)
self.data = data
self.lossless = lossless
def validate(self, data, lossless):
# Validate 3 channels
if not isinstance(data, PILImage.Image):
raise ValueError("Image shoud be a PIL Image")
if not isinstance(lossless, bool):
raise ValueError("lossless should be boolean")
return True
def to_jcr(self):
img_byte_arr = io.BytesIO()
if self.lossless:
self.data.save(img_byte_arr, format="png")
else:
# We'll save the image as JPEG. This is not lossless, but it is saves as the highest JPEG quality. This is 25 times faster than dumping as lossless PNG, and results in a size of only 1/5th the size, before b64 encoding.
# Measurements: PNG: 3.767667055130005s, 4008037 bytes -- PNG: 3.767667055130005s, 4008037 bytes
# For impact on algorithms see "On the Impact of Lossy Image and Video Compression on the Performance of Deep Convolutional Neural Network Architectures" (https://arxiv.org/abs/2007.14314), although this paper takes jpeg quality 95 as highest quality.
self.data.save(img_byte_arr, format="jpeg", quality=95)
img_byte_arr = img_byte_arr.getvalue()
b64 = base64.b64encode(img_byte_arr).decode()
data = {"type": self.class2str(), "params": {"data": b64, "lossless": self.lossless}}
return data
@classmethod
def from_jcr(cls, params):
b64 = params["data"]
img_byte_arr = io.BytesIO(base64.decodebytes(b64.encode()))
img = PILImage.open(img_byte_arr)
return cls(data=img)
class Numpy(RaymonDataType):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
if not isinstance(data, np.ndarray):
raise ValueError(f"Data must bu of type numpy.ndarray, not {type(data)}.")
return True
def to_jcr(self):
b64 = base64.b64encode(self.data).decode()
shape = self.data.shape
dtype = self.data.dtype
data = {"type": self.class2str(), "params": {"data": b64, "shape": str(shape), "dtype": str(dtype)}}
return data
@classmethod
def from_jcr(cls, params):
shape = ast.literal_eval(params["shape"])
dtype = params["dtype"]
b64 = params["data"]
nprest = np.frombuffer(base64.decodebytes(b64.encode()), dtype=str(dtype)).reshape(shape)
return cls(data=nprest)
class Series(RaymonDataType):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
if not isinstance(data, pd.Series):
raise ValueError("Data should be a Pandas Series")
return True
def to_jcr(self):
data = {
"type": self.class2str(),
"params": {
"data": json.loads(self.data.to_json()),
},
}
return data
@classmethod
def from_jcr(cls, jcr):
series =
|
pd.Series(**jcr)
|
pandas.Series
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
|
tm.assert_index_equal(result, idx)
|
pandas.util.testing.assert_index_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
data_proc.py
Calculate and plot analyzed data from centrifugation experiment
Handles the primary functions
"""
import sys
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
SUCCESS = 0
INVALID_DATA = 1
IO_ERROR = 2
DEF_CSV_FILE = 'data.csv'
DEF_EXCEL_FILE = 'data.xlsx'
def warning(*objs):
"""Writes a message to stderr."""
print("WARNING: ", *objs, file=sys.stderr)
def csv_data_analysis(csv_data_file):
"""
Calculates solvent concentration. Finds aging time and g dried cake/g oil for each row
Parameters
----------
csv_data_file : excel file containing array of experiment data
first row: solution information
second row and below: each row contains data from a run of centrifugation
(see README.md for more detailed description)
Returns
-------
cent_data : numpy array
first row: solvent concentration
second row and below: each row contains columns of aging time (h), aging time (s),
dried cake concentration (g/g oil)
"""
data_array = pd.read_csv(csv_data_file, comment='#', header=None)
# calculate solvent concentration
solvent_conc = np.divide(np.subtract(data_array.loc[[0], [3]], data_array.loc[[0], [2]]),
np.subtract(data_array.loc[[0], [3]], data_array.loc[[0], [1]]))
# find the start time of the experiment
start_time = data_array.loc[[0], [0]].values
# gather centrifugation data into separate arrays
expt_array = data_array[[1, 2, 3]].iloc[1:]
cent_time = data_array[[0]].iloc[1:]
# assign variables to each column of expt_array
empty_tube = expt_array[1]
tube_liquid = expt_array[2]
tube_dried_cake = expt_array[3]
# calculate mass of tube contents
mass_liquid = tube_liquid - empty_tube
mass_dried_cake = tube_dried_cake - empty_tube
mass_oil = (1-solvent_conc.iloc[0][3])*mass_liquid
# calculate solution aging time at each centrifugation
aging_time = [
pd.to_datetime(cent_time.values[i, 0])-pd.to_datetime(start_time[0, 0])
for i in range(len(cent_time.values))
]
aging_time_sec = pd.Series(aging_time).dt.total_seconds()
aging_time_hrs = aging_time_sec/3600
# calculate dried cake concentration
conc_dried_cake = mass_dried_cake/mass_oil
cent_data = pd.concat([aging_time_hrs, aging_time_sec, conc_dried_cake.reset_index(drop=True)], axis=1)
return cent_data
def excel_data_analysis(excel_data_file):
"""
Calculates solvent concentration. Finds aging time in hrs and seconds, and g dried cake/g oil for each row.
Works for excel file with multiple sheets
Parameters
----------
excel_data_file : excel file containing array of experiment data
first row: solution information
second row and below: each row contains data from a run of centrifugation
(see README.md for more detailed description)
data from an experiment in each sheet
Returns
-------
cent_data : pandas DataFrame
first row: solvent concentration
second row and below: each row contains columns of aging time (h), aging time (s),
dried cake concentration (g/g oil)
"""
i = 0
frame = None
# Concatenate analyzed data from each sheet in excel file
while i >= 0:
try:
calc_data = calcAndConc(excel_data_file, i)
frame = pd.concat([frame, calc_data], axis=1)
i = i + 1
except:
break
cent_data = frame
return cent_data
def calcAndConc(excel_data_file, i):
"""
Calculates solvent concentration. Finds aging time in hrs and seconds, and g dried cake/g oil for each row.
:param excel_data_file: excel file to read data from
:param i: sheet number of excel file data will be pulled from
:return: pandas DataFrame of aging time (hrs), aging time (sec), and dried cake conc (g/g oil) from data set
of sheet i
"""
all_data = pd.read_excel(excel_data_file, sheet_name=i)
# Separate solvent addition data
solvent_add_data = all_data.iloc[[0], [0, 1, 2, 3]]
# Separate centrifugation data
data_array = all_data.iloc[:, 4:8].dropna(0)
# Calculate solvent concentration
solvent_conc = np.divide(np.subtract(solvent_add_data.values[0, 3], solvent_add_data.values[0, 2]),
np.subtract(solvent_add_data.values[0, 3], solvent_add_data.values[0, 1]))
# find the start time of the experiment
start_time = solvent_add_data.values[0, 0]
# gather centrifugation data into separate arrays
expt_array = data_array.iloc[:, 1:4]
cent_time = data_array.iloc[:, 0]
# assign variables to each column of expt_array
empty_tube = expt_array.values[:, 0]
tube_liquid = expt_array.values[:, 1]
tube_dried_cake = expt_array.values[:, 2]
# calculate mass of tube contents
mass_liquid = tube_liquid - empty_tube
mass_dried_cake = tube_dried_cake - empty_tube
mass_oil = (1-solvent_conc)*mass_liquid
# calculate solution aging time at each centrifugation
aging_time = cent_time - start_time
aging_time_sec = pd.Series(aging_time).dt.total_seconds()
aging_time_hrs = aging_time_sec/3600
# calculate dried cake concentration
conc_dried_cake = pd.Series(mass_dried_cake/mass_oil)
cent_data =
|
pd.concat([aging_time_hrs, aging_time_sec, conc_dried_cake], axis=1)
|
pandas.concat
|
"""Miscellaneous internal PyJanitor helper functions."""
import functools
import os
import sys
import warnings
from itertools import chain, product
from typing import Callable, Dict, List, Optional, Pattern, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from .errors import JanitorError
def check(varname: str, value, expected_types: list):
"""
One-liner syntactic sugar for checking types.
Should be used like this::
check('x', x, [int, float])
:param varname: The name of the variable.
:param value: The value of the varname.
:param expected_types: The types we expect the item to be.
:raises TypeError: if data is not the expected type.
"""
is_expected_type = False
for t in expected_types:
if isinstance(value, t):
is_expected_type = True
break
if not is_expected_type:
raise TypeError(
"{varname} should be one of {expected_types}".format(
varname=varname, expected_types=expected_types
)
)
def _clean_accounting_column(x: str) -> float:
"""
Perform the logic for the `cleaning_style == "accounting"` attribute.
This is a private function, not intended to be used outside of
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method.
:returns: An object with a cleaned column.
"""
y = x.strip()
y = y.replace(",", "")
y = y.replace(")", "")
y = y.replace("(", "-")
if y == "-":
return 0.00
return float(y)
def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
"""
Perform logic for changing cell values.
This is a private function intended to be used only in
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method, after being passed
through `partial`.
"""
acceptable_currency_characters = {
"-",
".",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"0",
}
if len(x) == 0:
return "ORIGINAL_NA"
if cast_non_numeric:
if x in cast_non_numeric.keys():
check(
"{%r: %r}" % (x, str(cast_non_numeric[x])),
cast_non_numeric[x],
[int, float],
)
return cast_non_numeric[x]
else:
return "".join(i for i in x if i in acceptable_currency_characters)
else:
return "".join(i for i in x if i in acceptable_currency_characters)
def _replace_empty_string_with_none(column_series):
column_series.loc[column_series == ""] = None
return column_series
def _replace_original_empty_string_with_none(column_series):
column_series.loc[column_series == "ORIGINAL_NA"] = None
return column_series
def _strip_underscores(
df: pd.DataFrame, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""
Strip underscores from DataFrames column names.
Underscores can be stripped from the beginning, end or both.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame with underscores removed.
"""
df = df.rename(
columns=lambda x: _strip_underscores_func(x, strip_underscores)
)
return df
def _strip_underscores_func(
col: str, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""Strip underscores from a string."""
underscore_options = [None, "left", "right", "both", "l", "r", True]
if strip_underscores not in underscore_options:
raise JanitorError(
f"strip_underscores must be one of: {underscore_options}"
)
if strip_underscores in ["left", "l"]:
col = col.lstrip("_")
elif strip_underscores in ["right", "r"]:
col = col.rstrip("_")
elif strip_underscores == "both" or strip_underscores is True:
col = col.strip("_")
return col
def import_message(
submodule: str,
package: str,
conda_channel: str = None,
pip_install: bool = False,
):
"""
Return warning if package is not found.
Generic message for indicating to the user when a function relies on an
optional module / package that is not currently installed. Includes
installation instructions. Used in `chemistry.py` and `biology.py`.
:param submodule: pyjanitor submodule that needs an external dependency.
:param package: External package this submodule relies on.
:param conda_channel: Conda channel package can be installed from,
if at all.
:param pip_install: Whether package can be installed via pip.
"""
is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
installable = True
if is_conda:
if conda_channel is None:
installable = False
installation = f"{package} cannot be installed via conda"
else:
installation = f"conda install -c {conda_channel} {package}"
else:
if pip_install:
installation = f"pip install {package}"
else:
installable = False
installation = f"{package} cannot be installed via pip"
print(
f"To use the janitor submodule {submodule}, you need to install "
f"{package}."
)
print()
if installable:
print("To do so, use the following command:")
print()
print(f" {installation}")
else:
print(f"{installation}")
def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
"""
Raises error if a function operating on a `DataFrame` is not idempotent,
that is, `func(func(df)) = func(df)` is not true for all `df`.
:param func: A python method.
:param df: A pandas `DataFrame`.
:param args: Positional arguments supplied to the method.
:param kwargs: Keyword arguments supplied to the method.
:raises ValueError: If `func` is found to not be idempotent for the given
`DataFrame` `df`.
"""
if not func(df, *args, **kwargs) == func(
func(df, *args, **kwargs), *args, **kwargs
):
raise ValueError(
"Supplied function is not idempotent for the given " "DataFrame."
)
def deprecated_alias(**aliases) -> Callable:
"""
Used as a decorator when deprecating old function argument names, while
keeping backwards compatibility.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
Functional usage example:
.. code-block:: python
@deprecated_alias(a='alpha', b='beta')
def simple_sum(alpha, beta):
return alpha + beta
:param aliases: Dictionary of aliases for a function's arguments.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rename_kwargs(func.__name__, kwargs, aliases)
return func(*args, **kwargs)
return wrapper
return decorator
def refactored_function(message: str) -> Callable:
"""Used as a decorator when refactoring functions
Implementation is inspired from `Hacker Noon`_.
.. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987
Functional usage example:
.. code-block:: python
@refactored_function(
message="simple_sum() has been refactored. Use hard_sum() instead."
)
def simple_sum(alpha, beta):
return alpha + beta
:param message: Message to use in warning user about refactoring.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
def emit_warning(*args, **kwargs):
warnings.warn(message, FutureWarning)
return func(*args, **kwargs)
return emit_warning
return decorator
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
"""
Used to update deprecated argument names with new names. Throws a
TypeError if both arguments are provided, and warns if old alias is used.
Nothing is returned as the passed ``kwargs`` are modified directly.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
:param func_name: name of decorated function.
:param kwargs: Arguments supplied to the method.
:param aliases: Dictionary of aliases for a function's arguments.
:raises TypeError: if both arguments are provided.
""" # noqa: E501
for old_alias, new_alias in aliases.items():
if old_alias in kwargs:
if new_alias in kwargs:
raise TypeError(
f"{func_name} received both {old_alias} and {new_alias}"
)
warnings.warn(
f"{old_alias} is deprecated; use {new_alias}",
DeprecationWarning,
)
kwargs[new_alias] = kwargs.pop(old_alias)
def check_column(
df: pd.DataFrame, old_column_names: List, present: bool = True
):
"""
One-liner syntactic sugar for checking the presence or absence of a column.
Should be used like this::
check(df, ['a', 'b'], present=True)
:param df: The name of the variable.
:param old_column_names: A list of column names we want to check to see if
present (or absent) in df.
:param present: If True (default), checks to see if all of old_column_names
are in df.columns. If False, checks that none of old_column_names are
in df.columns.
:raises ValueError: if data is not the expected type.
"""
for column_name in old_column_names:
if present:
if column_name not in df.columns:
raise ValueError(
f"{column_name} not present in dataframe columns!"
)
else: # Tests for exclusion
if column_name in df.columns:
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
def skipna(f: Callable) -> Callable:
"""
Decorator for escaping np.nan and None in a function
Should be used like this::
df[column].apply(skipna(transform))
or::
@skipna
def transform(x):
pass
:param f: the function to be wrapped
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
if (type(x) is float and np.isnan(x)) or x is None:
return np.nan
else:
return f(x, *args, **kwargs)
return _wrapped
def skiperror(
f: Callable, return_x: bool = False, return_val=np.nan
) -> Callable:
"""
Decorator for escaping errors in a function
Should be used like this::
df[column].apply(
skiperror(transform, return_val=3, return_x=False))
or::
@skiperror(return_val=3, return_x=False)
def transform(x):
pass
:param f: the function to be wrapped
:param return_x: whether or not the original value that caused error
should be returned
:param return_val: the value to be returned when an error hits.
Ignored if return_x is True
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
try:
return f(x, *args, **kwargs)
except Exception:
if return_x:
return x
return return_val
return _wrapped
def _check_instance(entry: Dict):
"""
Function to check instances in the expand_grid function.
This checks if entry is a dictionary,
checks the instance of value in key:value pairs in entry,
and makes changes to other types as deemed necessary.
Additionally, ValueErrors are raised if empty containers are
passed in as values into the dictionary.
How each type is handled, and their associated exceptions,
are pretty clear from the code.
"""
# dictionary should not be empty
if not entry:
raise ValueError("passed dictionary cannot be empty")
# couple of checks that should cause the program to fail early
# if conditions are not met
for _, value in entry.items():
if isinstance(value, np.ndarray):
if value.size == 0:
raise ValueError("array cannot be empty")
if value.ndim > 2:
raise ValueError(
"expand_grid works only on 1D and 2D structures."
)
if isinstance(value, (pd.DataFrame, pd.Series)):
if value.empty:
raise ValueError("passed DataFrame cannot be empty")
if isinstance(value, (list, tuple, set, dict)):
if not value:
raise ValueError("passed data cannot be empty")
entry = {
# If it is a scalar value, then wrap in a list
# this is necessary, as we will use the itertools.product function
# which works only on iterables.
key: [value]
if isinstance(value, (type(None), int, float, bool, str, np.generic))
else value
for key, value in entry.items()
}
return entry
def _grid_computation(entry: Dict) -> pd.DataFrame:
"""
Return the final output of the expand_grid function as a dataframe.
This kicks in after the ``_check_instance`` function is completed,
and essentially creates a cross join of the values in the `entry`
dictionary. If the `entry` dictionary is a collection of lists/tuples,
then `itertools.product` will be used for the cross join, before a
dataframe is created; if however, the `entry` contains a pandas dataframe
or a pandas series or a numpy array, then identical indices are created for
each entry and `pandas DataFrame join` is called to create the cross join.
"""
# checks if the dictionary does not have any of
# (pd.Dataframe, pd.Series, numpy) values and uses itertools.product.
# numpy meshgrid is faster, but requires homogenous data to appreciate
# the speed, and also to keep the data type for each column created.
# As an example, if we have a mix in the dictionary of strings and numbers,
# numpy will convert it to an object data type. Itertools product is
# efficient and does not lose the data type.
if not any(
isinstance(value, (pd.DataFrame, pd.Series, np.ndarray))
for key, value in entry.items()
):
df_expand_grid = (value for key, value in entry.items())
df_expand_grid = product(*df_expand_grid)
return pd.DataFrame(df_expand_grid, columns=entry)
# dictionary is a mix of different types - dataframe/series/numpy/...
# so we check for each data type- if it is a pandas dataframe, then convert
# to numpy and add to `df_expand_grid`; the other data types are added to
# `df_expand_grid` as is. For each of the data types, new column names are
# created if they do not have, and modified if names already exist. These
# names are built through the for loop below and added to `df_columns`
df_columns = []
df_expand_grid = []
for key, value in entry.items():
if isinstance(value, pd.DataFrame):
df_expand_grid.append(value.to_numpy())
if isinstance(value.columns, pd.MultiIndex):
df_columns.extend(
[f"{key}_{ind}" for ind, col in enumerate(value.columns)]
)
else:
df_columns.extend([f"{key}_{col}" for col in value])
elif isinstance(value, pd.Series):
df_expand_grid.append(np.array(value))
if value.name:
df_columns.append(f"{key}_{value.name}")
else:
df_columns.append(str(key))
elif isinstance(value, np.ndarray):
df_expand_grid.append(value)
if value.ndim == 1:
df_columns.append(f"{key}_0")
else:
df_columns.extend(
[f"{key}_{ind}" for ind in range(value.shape[-1])]
)
else:
df_expand_grid.append(value)
df_columns.append(key)
# here we run the product function from itertools only if there is
# more than one item in the list; if only one item, we simply
# create a dataframe with the new column names from `df_columns`
if len(df_expand_grid) > 1:
df_expand_grid = product(*df_expand_grid)
df_expand_grid = (
chain.from_iterable(
[val]
if not isinstance(val, (pd.DataFrame, pd.Series, np.ndarray))
else val
for val in value
)
for value in df_expand_grid
)
return pd.DataFrame(df_expand_grid, columns=df_columns)
return pd.DataFrame(*df_expand_grid, columns=df_columns)
def _complete_groupings(df, list_of_columns):
# this collects all the columns as individual labels, which will be
# used to set the index of the dataframe
index_columns = []
# this will collect all the values associated with the respective
# columns, and used to reindex the dataframe, to get the complete
# pairings
reindex_columns = []
for item in list_of_columns:
if not isinstance(item, (str, dict, list, tuple)):
raise ValueError(
"""Value must either be a column label, a list/tuple of columns or a
dictionary where the keys are columns in the dataframe."""
)
if not item:
raise ValueError("grouping cannot be empty")
if isinstance(item, str):
reindex_columns.append(set(df[item].array))
index_columns.append(item)
else:
# this comes into play if we wish to input values that
# do not exist in the data, say years, or alphabets, or
# range of numbers
if isinstance(item, dict):
if len(item) > 1:
index_columns.extend(item.keys())
else:
index_columns.append(*item.keys())
item_contents = [
# convert scalars to iterables; this is necessary
# when creating combinations with itertools' product
[value]
if isinstance(value, (int, float, str, bool))
else value
for key, value in item.items()
]
reindex_columns.extend(item_contents)
else:
index_columns.extend(item)
# TODO : change this to read as a numpy instead
# instead of a list comprehension
# it should be faster
item = (df[sub_column].array for sub_column in item)
item = set(zip(*item))
reindex_columns.append(item)
reindex_columns = product(*reindex_columns)
# A list comprehension, coupled with itertools chain.from_iterable
# would likely be faster; I fear that it may hamper readability with
# nested list comprehensions; as such, I chose the for loop method.
new_reindex_columns = []
for row in reindex_columns:
new_row = []
for cell in row:
if isinstance(cell, tuple):
new_row.extend(cell)
else:
new_row.append(cell)
new_reindex_columns.append(tuple(new_row))
df = df.set_index(index_columns)
return df, new_reindex_columns
def _data_checks_pivot_longer(
df, index, column_names, names_sep, names_pattern, names_to, values_to
):
"""
This function raises errors or warnings if the arguments have the wrong
python type, or if an unneeded argument is provided. It also raises an
error message if `names_pattern` is a list/tuple of regular expressions,
and `names_to` is not a list/tuple, and the lengths do not match.
This function is executed before proceeding to the computation phase.
Type annotations are not provided because this function is where type
checking happens.
"""
if any(
(
isinstance(df.index, pd.MultiIndex),
isinstance(df.columns, pd.MultiIndex),
),
):
warnings.warn(
"""pivot_longer is designed for single index dataframes and
may produce unexpected results for multiIndex dataframes;
for such cases, kindly use pandas.melt."""
)
if index is not None:
if isinstance(index, str):
index = [index]
check("index", index, [list, tuple, Pattern])
if column_names is not None:
if isinstance(column_names, str):
column_names = [column_names]
check("column_names", column_names, [list, tuple, Pattern])
if names_to is not None:
check("names_to", names_to, [list, tuple, str])
if isinstance(names_to, (list, tuple)):
if not all(isinstance(word, str) for word in names_to):
raise TypeError(
"All entries in `names_to` argument must be strings."
)
if len(names_to) > 1:
if all((names_pattern is not None, names_sep is not None)):
raise ValueError(
"""Only one of names_pattern or names_sep
should be provided."""
)
if isinstance(names_to, str) or (len(names_to) == 1):
# names_sep creates more than one column
# whereas regex with names_pattern can be limited to one column
if names_sep is not None:
raise ValueError(
"""
For a single names_to value,
names_sep is not required.
"""
)
if names_pattern is not None:
check("names_pattern", names_pattern, [str, Pattern, List, Tuple])
if isinstance(names_pattern, (List, Tuple)):
if not all(
isinstance(word, (str, Pattern)) for word in names_pattern
):
raise TypeError(
"""
All entries in ``names_pattern`` argument
must be regular expressions.
"""
)
if not isinstance(names_to, (List, Tuple)):
raise TypeError(
"""
``names_to`` must be a list or tuple.
"""
)
if len(names_pattern) != len(names_to):
raise ValueError(
"""
Length of ``names_to`` does not match
number of patterns.
"""
)
if ".value" in names_to:
raise ValueError(
"""
``.value`` not accepted if ``names_pattern``
is a list/tuple.
"""
)
if names_sep is not None:
check("names_sep", names_sep, [str, Pattern])
check("values_to", values_to, [str])
return (
df,
index,
column_names,
names_sep,
names_pattern,
names_to,
values_to,
)
def _pivot_longer_pattern_match(
df: pd.DataFrame,
index: Optional[Union[str, Pattern]] = None,
column_names: Optional[Union[str, Pattern]] = None,
) -> Tuple:
"""
This checks if a pattern (regular expression) is supplied
to index or columns and extracts the names that match the
given regular expression.
"""
# TODO: allow `janitor.patterns` to accept a list/tuple
# of regular expresssions.
if isinstance(column_names, Pattern):
column_names = [col for col in df if column_names.search(col)]
if isinstance(index, Pattern):
index = [col for col in df if index.search(col)]
return df, index, column_names
def _reindex_func(frame: pd.DataFrame, indexer=None) -> pd.DataFrame:
"""
Function to reshape dataframe in pivot_longer, to try and make it look
similar to the source data in terms of direction of the columns. It is a
temporary measure until the minimum pandas version is 1.1, where we can
take advantage of the `ignore_index` argument in `pd.melt`.
Example: if the index column is `id`, and the values are :
[1,2,3,3,2,1,2,3], then when melted, the index column in the reshaped
dataframe, based on this function, will look like `1,1,1,2,2,2,3,3,3`.
A reindexed dataframe is returned.
"""
if indexer is None:
uniq_index_length = len(frame.drop_duplicates())
else:
uniq_index_length = len(frame.loc[:, indexer].drop_duplicates())
if "index" in indexer:
frame = frame.drop("index", axis=1)
sorter = np.reshape(frame.index, (-1, uniq_index_length))
sorter = np.ravel(sorter, order="F")
return frame.reindex(sorter)
def _computations_pivot_longer(
df: pd.DataFrame,
index: Optional[Union[List, Tuple]] = None,
column_names: Optional[Union[List, Tuple]] = None,
names_sep: Optional[Union[str, Pattern]] = None,
names_pattern: Optional[Union[str, Pattern]] = None,
names_to: Optional[Union[List, Tuple, str]] = None,
values_to: Optional[str] = "value",
) -> pd.DataFrame:
"""
This is the main workhorse of the `pivot_longer` function.
There are a couple of scenarios that this function takes care of when
unpivoting :
1. Regular data unpivoting is covered with pandas melt.
For the scenarios below, the dataframe is melted and separated into a
`before_df`, `between_df` and `after_df`.
2. if the length of `names_to` is > 1, the function unpivots the data,
using `pd.melt`, and then separates `between_df` into individual
columns, using `str.split(expand=True)` if `names_sep` is provided,
or `str.extractall()` if `names_pattern is provided. The labels in
`names_to` become the new column names.
3. If `names_to` contains `.value`, then the function replicates
`pd.wide_to_long`, using `pd.melt`. Unlike `pd.wide_to_long`, the
stubnames do not have to be prefixes, they just need to match the
position of `.value` in `names_to`. Just like in 2 above, the columns
in `between_df` are separated into individual columns. The labels in
the column corresponding to `.value` become the new column names, and
override `values_to` in the process. The other extracted columns stay
(if len(`names_to`) > 1), with the other names in `names_to` as
its column names.
4. If `names_pattern` is a list/tuple of regular expressions, it is
paired with `names_to`, which should be a list/tuple of new column
names. `.value` is not permissible in this scenario.
`numpy select` is called, along with `pd.Series.str.contains`,
to get rows in the `between_df` column that matches the regular
expressions in `names_pattern`. The labels in `names_to` replaces
the matched rows and become the new column names in the new dataframe.
The function also tries to emulate the way the source data is structured.
Say data looks like this :
id, a1, a2, a3, A1, A2, A3
1, a, b, c, A, B, C
when pivoted into long form, it will look like this :
id instance a A
0 1 1 a A
1 1 2 b B
2 1 3 c C
where the columns `a` comes before `A`, as it was in the source data,
and in column `a`, `a > b > c`, also as it was in the source data.
This also visually creates a complete set of the data per index.
"""
if index is not None:
check_column(df, index, present=True)
# this should take care of non unique index
# we'll get rid of the extra in _reindex_func
# TODO: what happens if `index` is already a name
# in the columns?
if df.loc[:, index].duplicated().any():
df = df.reset_index()
index = ["index"] + index
if column_names is not None:
check_column(df, column_names, present=True)
if index is None and (column_names is not None):
index = df.columns.difference(column_names)
# scenario 1
if all((names_pattern is None, names_sep is None)):
df = pd.melt(
df,
id_vars=index,
value_vars=column_names,
var_name=names_to,
value_name=values_to,
)
# reshape in the order that the data appears
# this should be easier to do with ignore_index in pandas version 1.1
if index is not None:
df = _reindex_func(df, index).reset_index(drop=True)
return df.transform(pd.to_numeric, errors="ignore")
return df
# scenario 2
if any((names_pattern is not None, names_sep is not None)):
# should avoid conflict if index/columns has a string named `variable`
uniq_name = "*^#variable!@?$%"
df = pd.melt(
df, id_vars=index, value_vars=column_names, var_name=uniq_name
)
# pd.melt returns uniq_name and value as the last columns. We can use
# that knowledge to get the data before( the index column(s)),
# the data between (our uniq_name column),
# and the data after (our values column)
position = df.columns.get_loc(uniq_name)
if position == 0:
before_df = pd.DataFrame([], index=df.index)
else:
# just before uniq_name column
before_df = df.iloc[:, :-2]
after_df = df.iloc[:, -1].rename(values_to)
between_df = df.pop(uniq_name)
if names_sep is not None:
between_df = between_df.str.split(names_sep, expand=True)
else:
# this takes care of scenario 4
# and reconfigures it so it takes the scenario 3 path
# with `.value`
if isinstance(names_pattern, (list, tuple)):
condlist = [
between_df.str.contains(regex) for regex in names_pattern
]
between_df = np.select(condlist, names_to, None)
names_to = [".value"]
between_df = pd.DataFrame(between_df, columns=names_to)
if between_df.loc[:, ".value"].hasnans:
between_df = between_df.dropna()
else:
between_df = between_df.str.extractall(
names_pattern
).droplevel(-1)
# set_axis function labels argument takes only list-like objects
if isinstance(names_to, str):
names_to = [names_to]
# check number of columns
# before assigning names_to as `between_df` new columns
if len(names_to) != between_df.shape[-1]:
raise ValueError(
"""
Length of ``names_to`` does not match
number of columns extracted.
"""
)
# safeguard for a regex that returns nothing
if between_df.empty:
raise ValueError(
"""
The regular expression in ``names_pattern`` did not
return any matches.
"""
)
before_df = _reindex_func(before_df, index)
between_df = between_df.set_axis(names_to, axis="columns")
# we take a detour here to deal with paired columns, where the user
# might want one of the names in the paired column as part of the
# new column names. The `.value` indicates that that particular
# value becomes a header.
# It is also another way of achieving pandas wide_to_long.
# Let's see an example of a paired column
# say we have this data :
# data is copied from pandas wide_to_long documentation
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.wide_to_long.html
# famid birth ht1 ht2
# 0 1 1 2.8 3.4
# 1 1 2 2.9 3.8
# 2 1 3 2.2 2.9
# 3 2 1 2.0 3.2
# 4 2 2 1.8 2.8
# 5 2 3 1.9 2.4
# 6 3 1 2.2 3.3
# 7 3 2 2.3 3.4
# 8 3 3 2.1 2.9
# and we want to reshape into data that looks like this :
# famid birth age ht
# 0 1 1 1 2.8
# 1 1 1 2 3.4
# 2 1 2 1 2.9
# 3 1 2 2 3.8
# 4 1 3 1 2.2
# 5 1 3 2 2.9
# 6 2 1 1 2.0
# 7 2 1 2 3.2
# 8 2 2 1 1.8
# 9 2 2 2 2.8
# 10 2 3 1 1.9
# 11 2 3 2 2.4
# 12 3 1 1 2.2
# 13 3 1 2 3.3
# 14 3 2 1 2.3
# 15 3 2 2 3.4
# 16 3 3 1 2.1
# 17 3 3 2 2.9
# We have height(`ht`) and age(`1,2`) paired in the column name.
# We pass ``names_to`` as ('.value', 'age'), and names_pattern
# as "(ht)(\d)". If ``names_to`` and ``names_pattern`` are paired,
# we get --> {".value":"ht", "age": "\d"}.
# This instructs the function to keep "ht" as a column name
# (since it is directly mapped to `.value`) in the new dataframe,
# and create a new `age` column, that contains all the numbers.
# Also, the code tries to ensure a complete collection for each index;
# sorted in their order of appearance in the source dataframe.
# Note how `1, 2` is repeated for the extracted age column for each
# combination of `famid` and `birth`. The repeat of `1,2` also
# simulates how it looks in the source data : `ht1 > ht2`.
# As such, for every index, there is a complete set of the data;
# the user can visually see the unpivoted data for each index
# and be assured of complete/accurate sync.
# scenario 3
if ".value" in names_to:
if names_to.count(".value") > 1:
raise ValueError(
"Column name `.value` must not be duplicated."
)
# extract new column names and assign category dtype
# apart from memory usage, the primary aim of the category
# dtype is to ensure that the data is sorted in order of
# appearance in the source dataframe
between_df_unique_values = {
key: pd.unique(value) for key, value in between_df.items()
}
after_df_cols = between_df_unique_values[".value"]
between_df_dtypes = {
key: CategoricalDtype(value, ordered=True)
for key, value in between_df_unique_values.items()
}
between_df = between_df.astype(between_df_dtypes)
if len(names_to) > 1:
other_headers = between_df.columns.difference(
[".value"], sort=False
)
other_headers = other_headers.tolist()
between_df = between_df.sort_values([".value"] + other_headers)
else:
other_headers = None
# index order not assured if just .value` and quicksort
between_df = between_df.sort_values(
[".value"], kind="mergesort"
)
# reshape index_sorter and use the first column as the index
# of the reshaped after_df. after_df will be reshaped into
# specific number of columns, based on the length of
# `after_df_cols`
index_sorter = between_df.index
after_df = after_df.reindex(index_sorter).to_numpy()
# this will serve as the index for the `after_df` frame
# as well as the `between_df` frame
# it works because we reshaped the `after_df` into n
# number of columns, where n == len(after_df_cols)
# e.g if the dataframe initially was 24 rows, if it is
# reshaped to a 3 column dataframe, rows will become 8
# we then pick the first 8 rows from index_sorter as
# the index for both `after_df` and `between_df`
after_df_cols_len = len(after_df_cols)
index_sorter = index_sorter[
: index_sorter.size // after_df_cols_len
]
after_df = np.reshape(after_df, (-1, after_df_cols_len), order="F")
after_df = pd.DataFrame(
after_df, columns=after_df_cols, index=index_sorter
)
if other_headers:
between_df = between_df.loc[index_sorter, other_headers]
else:
between_df = pd.DataFrame([], index=index_sorter)
if position == 0: # no index or column_names supplied
df =
|
pd.DataFrame.join(between_df, after_df, how="inner")
|
pandas.DataFrame.join
|
"""
Code from Modeling and Simulation in Python.
Copyright 2020 <NAME>
MIT License: https://opensource.org/licenses/MIT
"""
import logging
logger = logging.getLogger(name="modsim.py")
# make sure we have Python 3.6 or better
import sys
if sys.version_info < (3, 6):
logger.warning("modsim.py depends on Python 3.6 features.")
import inspect
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
from types import SimpleNamespace
from copy import copy
import pint
units = pint.UnitRegistry()
Quantity = units.Quantity
def flip(p=0.5):
"""Flips a coin with the given probability.
p: float 0-1
returns: boolean (True or False)
"""
return np.random.random() < p
def cart2pol(x, y, z=None):
"""Convert Cartesian coordinates to polar.
x: number or sequence
y: number or sequence
z: number or sequence (optional)
returns: theta, rho OR theta, rho, z
"""
x = np.asarray(x)
y = np.asarray(y)
rho = np.hypot(x, y)
theta = np.arctan2(y, x)
if z is None:
return theta, rho
else:
return theta, rho, z
def pol2cart(theta, rho, z=None):
"""Convert polar coordinates to Cartesian.
theta: number or sequence in radians
rho: number or sequence
z: number or sequence (optional)
returns: x, y OR x, y, z
"""
x = rho * np.cos(theta)
y = rho * np.sin(theta)
if z is None:
return x, y
else:
return x, y, z
from numpy import linspace
def linrange(start, stop, step=1, **options):
"""Make an array of equally spaced values.
start: first value
stop: last value (might be approximate)
step: difference between elements (should be consistent)
returns: NumPy array
"""
n = int(round((stop-start) / step))
return linspace(start, stop, n+1, **options)
def leastsq(error_func, x0, *args, **options):
"""Find the parameters that yield the best fit for the data.
`x0` can be a sequence, array, Series, or Params
Positional arguments are passed along to `error_func`.
Keyword arguments are passed to `scipy.optimize.leastsq`
error_func: function that computes a sequence of errors
x0: initial guess for the best parameters
args: passed to error_func
options: passed to leastsq
:returns: Params object with best_params and ModSimSeries with details
"""
# override `full_output` so we get a message if something goes wrong
options["full_output"] = True
# run leastsq
t = scipy.optimize.leastsq(error_func, x0=x0, args=args, **options)
best_params, cov_x, infodict, mesg, ier = t
# pack the results into a ModSimSeries object
details = ModSimSeries(infodict)
details.set(cov_x=cov_x, mesg=mesg, ier=ier)
# if we got a Params object, we should return a Params object
if isinstance(x0, Params):
best_params = Params(Series(best_params, x0.index))
# return the best parameters and details
return best_params, details
def minimize_scalar(min_func, bounds, *args, **options):
"""Finds the input value that minimizes `min_func`.
Wrapper for
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html
min_func: computes the function to be minimized
bounds: sequence of two values, lower and upper bounds of the range to be searched
args: any additional positional arguments are passed to min_func
options: any keyword arguments are passed as options to minimize_scalar
returns: ModSimSeries object
"""
try:
min_func(bounds[0], *args)
except Exception as e:
msg = """Before running scipy.integrate.minimize_scalar, I tried
running the function you provided with the
lower bound, and I got the following error:"""
logger.error(msg)
raise (e)
underride(options, xatol=1e-3)
res = scipy.optimize.minimize_scalar(
min_func,
bracket=bounds,
bounds=bounds,
args=args,
method="bounded",
options=options,
)
if not res.success:
msg = (
"""scipy.optimize.minimize_scalar did not succeed.
The message it returned is %s"""
% res.message
)
raise Exception(msg)
return res
def maximize_scalar(max_func, bounds, *args, **options):
"""Finds the input value that maximizes `max_func`.
Wrapper for https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html
min_func: computes the function to be maximized
bounds: sequence of two values, lower and upper bounds of the
range to be searched
args: any additional positional arguments are passed to max_func
options: any keyword arguments are passed as options to minimize_scalar
returns: ModSimSeries object
"""
def min_func(*args):
return -max_func(*args)
res = minimize_scalar(min_func, bounds, *args, **options)
# we have to negate the function value before returning res
res.fun = -res.fun
return res
def minimize_golden(min_func, bracket, *args, **options):
"""Find the minimum of a function by golden section search.
Based on
https://en.wikipedia.org/wiki/Golden-section_search#Iterative_algorithm
:param min_func: function to be minimized
:param bracket: interval containing a minimum
:param args: arguments passes to min_func
:param options: rtol and maxiter
:return: ModSimSeries
"""
maxiter = options.get("maxiter", 100)
rtol = options.get("rtol", 1e-3)
def success(**kwargs):
return ModSimSeries(dict(success=True, **kwargs))
def failure(**kwargs):
return ModSimSeries(dict(success=False, **kwargs))
a, b = bracket
ya = min_func(a, *args)
yb = min_func(b, *args)
phi = 2 / (np.sqrt(5) - 1)
h = b - a
c = b - h / phi
yc = min_func(c, *args)
d = a + h / phi
yd = min_func(d, *args)
if yc > ya or yc > yb:
return failure(message="The bracket is not well-formed.")
for i in range(maxiter):
# check for convergence
if abs(h / c) < rtol:
return success(x=c, fun=yc)
if yc < yd:
b, yb = d, yd
d, yd = c, yc
h = b - a
c = b - h / phi
yc = min_func(c, *args)
else:
a, ya = c, yc
c, yc = d, yd
h = b - a
d = a + h / phi
yd = min_func(d, *args)
# if we exited the loop, too many iterations
return failure(root=c, message="maximum iterations = %d exceeded" % maxiter)
def maximize_golden(max_func, bracket, *args, **options):
"""Find the maximum of a function by golden section search.
:param min_func: function to be maximized
:param bracket: interval containing a maximum
:param args: arguments passes to min_func
:param options: rtol and maxiter
:return: ModSimSeries
"""
def min_func(*args):
return -max_func(*args)
res = minimize_golden(min_func, bracket, *args, **options)
# we have to negate the function value before returning res
res.fun = -res.fun
return res
def minimize_powell(min_func, x0, *args, **options):
"""Finds the input value that minimizes `min_func`.
Wrapper for https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
min_func: computes the function to be minimized
x0: initial guess
args: any additional positional arguments are passed to min_func
options: any keyword arguments are passed as options to minimize_scalar
returns: ModSimSeries object
"""
underride(options, tol=1e-3)
res = scipy.optimize.minimize(min_func, x0, *args, **options)
return ModSimSeries(res)
# make aliases for minimize and maximize
minimize = minimize_golden
maximize = maximize_golden
def run_solve_ivp(system, slope_func, **options):
"""Computes a numerical solution to a differential equation.
`system` must contain `init` with initial conditions,
`t_0` with the start time, and `t_end` with the end time.
It can contain any other parameters required by the slope function.
`options` can be any legal options of `scipy.integrate.solve_ivp`
system: System object
slope_func: function that computes slopes
returns: TimeFrame
"""
system = remove_units(system)
# make sure `system` contains `init`
if not hasattr(system, "init"):
msg = """It looks like `system` does not contain `init`
as a system variable. `init` should be a State
object that specifies the initial condition:"""
raise ValueError(msg)
# make sure `system` contains `t_end`
if not hasattr(system, "t_end"):
msg = """It looks like `system` does not contain `t_end`
as a system variable. `t_end` should be the
final time:"""
raise ValueError(msg)
# the default value for t_0 is 0
t_0 = getattr(system, "t_0", 0)
# try running the slope function with the initial conditions
try:
slope_func(t_0, system.init, system)
except Exception as e:
msg = """Before running scipy.integrate.solve_ivp, I tried
running the slope function you provided with the
initial conditions in `system` and `t=t_0` and I got
the following error:"""
logger.error(msg)
raise (e)
# get the list of event functions
events = options.get('events', [])
# if there's only one event function, put it in a list
try:
iter(events)
except TypeError:
events = [events]
for event_func in events:
# make events terminal unless otherwise specified
if not hasattr(event_func, 'terminal'):
event_func.terminal = True
# test the event function with the initial conditions
try:
event_func(t_0, system.init, system)
except Exception as e:
msg = """Before running scipy.integrate.solve_ivp, I tried
running the event function you provided with the
initial conditions in `system` and `t=t_0` and I got
the following error:"""
logger.error(msg)
raise (e)
# get dense output unless otherwise specified
underride(options, dense_output=True)
# run the solver
bunch = solve_ivp(slope_func, [t_0, system.t_end], system.init,
args=[system], **options)
# separate the results from the details
y = bunch.pop("y")
t = bunch.pop("t")
# get the column names from `init`, if possible
if hasattr(system.init, 'index'):
columns = system.init.index
else:
columns = range(len(system.init))
# evaluate the results at equally-spaced points
if options.get('dense_output', False):
try:
num = system.num
except AttributeError:
num = 51
t_final = t[-1]
t_array = linspace(t_0, t_final, num)
y_array = bunch.sol(t_array)
# pack the results into a TimeFrame
results = TimeFrame(y_array.T, index=t_array,
columns=columns)
else:
results = TimeFrame(y.T, index=t,
columns=columns)
return results, bunch
def check_system(system, slope_func):
"""Make sure the system object has the fields we need for run_ode_solver.
:param system:
:param slope_func:
:return:
"""
# make sure `system` contains `init`
if not hasattr(system, "init"):
msg = """It looks like `system` does not contain `init`
as a system variable. `init` should be a State
object that specifies the initial condition:"""
raise ValueError(msg)
# make sure `system` contains `t_end`
if not hasattr(system, "t_end"):
msg = """It looks like `system` does not contain `t_end`
as a system variable. `t_end` should be the
final time:"""
raise ValueError(msg)
# the default value for t_0 is 0
t_0 = getattr(system, "t_0", 0)
# get the initial conditions
init = system.init
# get t_end
t_end = system.t_end
# if dt is not specified, take 100 steps
try:
dt = system.dt
except AttributeError:
dt = t_end / 100
return init, t_0, t_end, dt
def run_euler(system, slope_func, **options):
"""Computes a numerical solution to a differential equation.
`system` must contain `init` with initial conditions,
`t_end` with the end time, and `dt` with the time step.
`system` may contain `t_0` to override the default, 0
It can contain any other parameters required by the slope function.
`options` can be ...
system: System object
slope_func: function that computes slopes
returns: TimeFrame
"""
# the default message if nothing changes
msg = "The solver successfully reached the end of the integration interval."
# get parameters from system
init, t_0, t_end, dt = check_system(system, slope_func)
# make the TimeFrame
frame = TimeFrame(columns=init.index)
frame.row[t_0] = init
ts = linrange(t_0, t_end, dt) * get_units(t_end)
# run the solver
for t1 in ts:
y1 = frame.row[t1]
slopes = slope_func(y1, t1, system)
y2 = [y + slope * dt for y, slope in zip(y1, slopes)]
t2 = t1 + dt
frame.row[t2] = y2
details = ModSimSeries(dict(message="Success"))
return frame, details
def run_ralston(system, slope_func, **options):
"""Computes a numerical solution to a differential equation.
`system` must contain `init` with initial conditions,
and `t_end` with the end time.
`system` may contain `t_0` to override the default, 0
It can contain any other parameters required by the slope function.
`options` can be ...
system: System object
slope_func: function that computes slopes
returns: TimeFrame
"""
# the default message if nothing changes
msg = "The solver successfully reached the end of the integration interval."
# get parameters from system
init, t_0, t_end, dt = check_system(system, slope_func)
# make the TimeFrame
frame = TimeFrame(columns=init.index)
frame.row[t_0] = init
ts = linrange(t_0, t_end, dt) * get_units(t_end)
event_func = options.get("events", None)
z1 = np.nan
def project(y1, t1, slopes, dt):
t2 = t1 + dt
y2 = [y + slope * dt for y, slope in zip(y1, slopes)]
return y2, t2
# run the solver
for t1 in ts:
y1 = frame.row[t1]
# evaluate the slopes at the start of the time step
slopes1 = slope_func(y1, t1, system)
# evaluate the slopes at the two-thirds point
y_mid, t_mid = project(y1, t1, slopes1, 2 * dt / 3)
slopes2 = slope_func(y_mid, t_mid, system)
# compute the weighted sum of the slopes
slopes = [(k1 + 3 * k2) / 4 for k1, k2 in zip(slopes1, slopes2)]
# compute the next time stamp
y2, t2 = project(y1, t1, slopes, dt)
# check for a terminating event
if event_func:
z2 = event_func(y2, t2, system)
if z1 * z2 < 0:
scale = magnitude(z1 / (z1 - z2))
y2, t2 = project(y1, t1, slopes, scale * dt)
frame.row[t2] = y2
msg = "A termination event occurred."
break
else:
z1 = z2
# store the results
frame.row[t2] = y2
details = ModSimSeries(dict(success=True, message=msg))
return frame, details
run_ode_solver = run_ralston
# TODO: Implement leapfrog
def fsolve(func, x0, *args, **options):
"""Return the roots of the (non-linear) equations
defined by func(x) = 0 given a starting estimate.
Uses scipy.optimize.fsolve, with extra error-checking.
func: function to find the roots of
x0: scalar or array, initial guess
args: additional positional arguments are passed along to fsolve,
which passes them along to func
returns: solution as an array
"""
# make sure we can run the given function with x0
try:
func(x0, *args)
except Exception as e:
msg = """Before running scipy.optimize.fsolve, I tried
running the error function you provided with the x0
you provided, and I got the following error:"""
logger.error(msg)
raise (e)
# make the tolerance more forgiving than the default
underride(options, xtol=1e-6)
# run fsolve
result = scipy.optimize.fsolve(func, x0, args=args, **options)
return result
def crossings(series, value):
"""Find the labels where the series passes through value.
The labels in series must be increasing numerical values.
series: Series
value: number
returns: sequence of labels
"""
values = series.values - value
interp = InterpolatedUnivariateSpline(series.index, values)
return interp.roots()
def has_nan(a):
"""Checks whether the an array contains any NaNs.
:param a: NumPy array or Pandas Series
:return: boolean
"""
return np.any(np.isnan(a))
def is_strictly_increasing(a):
"""Checks whether the elements of an array are strictly increasing.
:param a: NumPy array or Pandas Series
:return: boolean
"""
return np.all(np.diff(a) > 0)
def interpolate(series, **options):
"""Creates an interpolation function.
series: Series object
options: any legal options to scipy.interpolate.interp1d
returns: function that maps from the index to the values
"""
if has_nan(series.index):
msg = """The Series you passed to interpolate contains
NaN values in the index, which would result in
undefined behavior. So I'm putting a stop to that."""
raise ValueError(msg)
if not is_strictly_increasing(series.index):
msg = """The Series you passed to interpolate has an index
that is not strictly increasing, which would result in
undefined behavior. So I'm putting a stop to that."""
raise ValueError(msg)
# make the interpolate function extrapolate past the ends of
# the range, unless `options` already specifies a value for `fill_value`
underride(options, fill_value="extrapolate")
# call interp1d, which returns a new function object
x = series.index
y = series.values
interp_func = interp1d(x, y, **options)
return interp_func
def interpolate_inverse(series, **options):
"""Interpolate the inverse function of a Series.
series: Series object, represents a mapping from `a` to `b`
options: any legal options to scipy.interpolate.interp1d
returns: interpolation object, can be used as a function
from `b` to `a`
"""
inverse = Series(series.index, index=series.values)
interp_func = interpolate(inverse, **options)
return interp_func
def gradient(series, **options):
"""Computes the numerical derivative of a series.
If the elements of series have units, they are dropped.
series: Series object
options: any legal options to np.gradient
returns: Series, same subclass as series
"""
x = series.index
y = series.values
a = np.gradient(y, x, **options)
return series.__class__(a, series.index)
def source_code(obj):
"""Prints the source code for a given object.
obj: function or method object
"""
print(inspect.getsource(obj))
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def contour(df, **options):
"""Makes a contour plot from a DataFrame.
Wrapper for plt.contour
https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.contour.html
Note: columns and index must be numerical
df: DataFrame
options: passed to plt.contour
"""
fontsize = options.pop("fontsize", 12)
underride(options, cmap="viridis")
x = df.columns
y = df.index
X, Y = np.meshgrid(x, y)
cs = plt.contour(X, Y, df, **options)
plt.clabel(cs, inline=1, fontsize=fontsize)
def savefig(filename, **options):
"""Save the current figure.
Keyword arguments are passed along to plt.savefig
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
filename: string
"""
print("Saving figure to file", filename)
plt.savefig(filename, **options)
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def remove_from_legend(bad_labels):
"""Removes some labels from the legend.
bad_labels: sequence of strings
"""
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
handle_list, label_list = [], []
for handle, label in zip(handles, labels):
if label not in bad_labels:
handle_list.append(handle)
label_list.append(label)
ax.legend(handle_list, label_list)
class SettableNamespace(SimpleNamespace):
"""Contains a collection of parameters.
Used to make a System object.
Takes keyword arguments and stores them as attributes.
"""
def __init__(self, namespace=None, **kwargs):
super().__init__()
if namespace:
self.__dict__.update(namespace.__dict__)
self.__dict__.update(kwargs)
def get(self, name, default=None):
"""Look up a variable.
name: string varname
default: value returned if `name` is not present
"""
try:
return self.__getattribute__(name, default)
except AttributeError:
return default
def set(self, **variables):
"""Make a copy and update the given variables.
returns: Params
"""
new = copy(self)
new.__dict__.update(variables)
return new
def magnitude(x):
"""Returns the magnitude of a Quantity or number.
x: Quantity or number
returns: number
"""
return x.magnitude if hasattr(x, 'magnitude') else x
def remove_units(namespace):
"""Removes units from the values in a Namespace.
Only removes units from top-level values;
does not traverse nested values.
returns: new Namespace object
"""
res = copy(namespace)
for label, value in res.__dict__.items():
if isinstance(value, pd.Series):
value = remove_units_series(value)
res.__dict__[label] = magnitude(value)
return res
def remove_units_series(series):
"""Removes units from the values in a Series.
Only removes units from top-level values;
does not traverse nested values.
returns: new Series object
"""
res = copy(series)
for label, value in res.iteritems():
res[label] = magnitude(value)
return res
class System(SettableNamespace):
"""Contains system parameters and their values.
Takes keyword arguments and stores them as attributes.
"""
pass
class Params(SettableNamespace):
"""Contains system parameters and their values.
Takes keyword arguments and stores them as attributes.
"""
pass
def State(**variables):
"""Contains the values of state variables."""
return pd.Series(variables)
def TimeSeries(*args, **kwargs):
"""
"""
if args or kwargs:
series = pd.Series(*args, **kwargs)
else:
series = pd.Series([], dtype=np.float64)
series.index.name = 'Time'
if 'name' not in kwargs:
series.name = 'Quantity'
return series
def SweepSeries(*args, **kwargs):
"""
"""
if args or kwargs:
series = pd.Series(*args, **kwargs)
else:
series = pd.Series([], dtype=np.float64)
series.index.name = 'Parameter'
if 'name' not in kwargs:
series.name = 'Metric'
return series
def TimeFrame(*args, **kwargs):
"""DataFrame that maps from time to State.
"""
return pd.DataFrame(*args, **kwargs)
def SweepFrame(*args, **kwargs):
"""DataFrame that maps from parameter value to SweepSeries.
"""
return
|
pd.DataFrame(*args, **kwargs)
|
pandas.DataFrame
|
# %% [markdown]
# ## Get COVID-19 Data
# %%
# Load packages
from rich import pretty, inspect, traceback
from rich.console import Console
import requests
import pandas as pd
# Pretty print data structures
console = Console()
pretty.install()
traceback.install()
# %%
BASE_URL = "https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/Coronavirus_2019_nCoV_Cases/FeatureServer/1/query?"
RESPONSE_FORMAT = "json"
OUT_FIELDS = "*"
OUT_SR = "4326"
WHERE = "Country_Region='US'"
url_request = requests.get(f"{BASE_URL}where={WHERE}&outFields={OUT_FIELDS}&outSR={OUT_SR}&f={RESPONSE_FORMAT}")
url_json = url_request.json()
df =
|
pd.DataFrame(url_json['features'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
|
MultiIndex.from_arrays(arrays=[])
|
pandas.MultiIndex.from_arrays
|
# -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
import copy
import inspect
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.metrics import get_scorer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from hypernets.experiment import Experiment
from hypernets.tabular import dask_ex as dex
from hypernets.tabular import drift_detection as dd
from hypernets.tabular.cache import cache
from hypernets.tabular.data_cleaner import DataCleaner
from hypernets.tabular.ensemble import GreedyEnsemble, DaskGreedyEnsemble
from hypernets.tabular.feature_importance import permutation_importance_batch, select_by_feature_importance
from hypernets.tabular.feature_selection import select_by_multicollinearity
from hypernets.tabular.general import general_estimator, general_preprocessor
from hypernets.tabular.lifelong_learning import select_valid_oof
from hypernets.tabular.pseudo_labeling import sample_by_pseudo_labeling
from hypernets.utils import logging, const
logger = logging.get_logger(__name__)
DEFAULT_EVAL_SIZE = 0.3
def _set_log_level(log_level):
logging.set_level(log_level)
# if log_level >= logging.ERROR:
# import logging as pylogging
# pylogging.basicConfig(level=log_level)
class StepNames:
DATA_CLEAN = 'data_clean'
FEATURE_GENERATION = 'feature_generation'
MULITICOLLINEARITY_DETECTION = 'multicollinearity_detection'
DRIFT_DETECTION = 'drift_detection'
FEATURE_IMPORTANCE_SELECTION = 'feature_selection'
SPACE_SEARCHING = 'space_searching'
ENSEMBLE = 'ensemble'
TRAINING = 'training'
PSEUDO_LABELING = 'pseudo_labeling'
FEATURE_RESELECTION = 'feature_reselection'
FINAL_SEARCHING = 'two_stage_searching'
FINAL_ENSEMBLE = 'final_ensemble'
FINAL_TRAINING = 'final_train'
class ExperimentStep(BaseEstimator):
def __init__(self, experiment, name):
super(ExperimentStep, self).__init__()
self.name = name
self.experiment = experiment
# fitted
self.input_features_ = None
self.status_ = None # None(not fit) or True(fit succeed) or False(fit failed)
def step_progress(self, *args, **kwargs):
if self.experiment is not None:
self.experiment.step_progress(*args, **kwargs)
@property
def task(self):
return self.experiment.task if self.experiment is not None else None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
self.input_features_ = X_train.columns.to_list()
# self.status_ = True
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
raise NotImplemented()
# return X
def is_transform_skipped(self):
return False
def get_fitted_params(self):
return {'input_features': self.input_features_}
# override this to remove 'experiment' from estimator __expr__
@classmethod
def _get_param_names(cls):
params = super()._get_param_names()
return filter(lambda x: x != 'experiment', params)
def __getstate__(self):
state = super().__getstate__()
# Don't pickle experiment
if 'experiment' in state.keys():
state['experiment'] = None
return state
def _repr_df_(self):
init_params = self.get_params()
fitted_params = self.get_fitted_params()
init_df = pd.Series(init_params, name='value').to_frame()
init_df['kind'] = 'settings'
fitted_df = pd.Series(fitted_params, name='value').to_frame()
fitted_df['kind'] = 'fitted'
df = pd.concat([init_df, fitted_df], axis=0)
df['key'] = df.index
df = df.set_index(['kind', 'key'])
return df
def _repr_html_(self):
df = self._repr_df_()
html = f'<h2>{self.name}</h2>{df._repr_html_()}'
return html
class FeatureSelectStep(ExperimentStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.selected_features_ = None
def transform(self, X, y=None, **kwargs):
if self.selected_features_ is not None:
if logger.is_debug_enabled():
msg = f'{self.name} transform from {len(X.columns.tolist())} to {len(self.selected_features_)} features'
logger.debug(msg)
X = X[self.selected_features_]
return X
def cache_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
if self.selected_features_ is not None:
features = self.selected_features_
X_train = X_train[features]
if X_test is not None:
X_test = X_test[features]
if X_eval is not None:
X_eval = X_eval[features]
if logger.is_info_enabled():
logger.info(f'{self.name} cache_transform: {len(X_train.columns)} columns kept.')
else:
if logger.is_info_enabled():
logger.info(f'{self.name} cache_transform: {len(X_train.columns)} columns kept (do nothing).')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def is_transform_skipped(self):
return self.selected_features_ is None
def get_fitted_params(self):
if self.selected_features_ is None:
unselected = None
else:
unselected = list(filter(lambda _: _ not in self.selected_features_, self.input_features_))
return {**super().get_fitted_params(),
'selected_features': self.selected_features_,
'unselected_features': unselected}
class DataCleanStep(FeatureSelectStep):
def __init__(self, experiment, name, data_cleaner_args=None,
cv=False, train_test_split_strategy=None, random_state=None):
super().__init__(experiment, name)
self.data_cleaner_args = data_cleaner_args if data_cleaner_args is not None else {}
self.cv = cv
self.train_test_split_strategy = train_test_split_strategy
self.random_state = random_state
# fitted
self.data_cleaner_ = None
self.detector_ = None
self.data_shapes_ = None
@cache(arg_keys='X_train,y_train,X_test,X_eval,y_eval',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,data_cleaner_,detector_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
# 1. Clean Data
if self.cv and X_eval is not None and y_eval is not None:
logger.info(f'{self.name} cv enabled, so concat train data and eval data')
X_train = dex.concat_df([X_train, X_eval], axis=0)
y_train = dex.concat_df([y_train, y_eval], axis=0)
X_eval = None
y_eval = None
data_cleaner = DataCleaner(**self.data_cleaner_args)
logger.info(f'{self.name} fit_transform with train data')
X_train, y_train = data_cleaner.fit_transform(X_train, y_train)
self.step_progress('fit_transform train set')
if X_test is not None:
logger.info(f'{self.name} transform test data')
X_test = data_cleaner.transform(X_test)
self.step_progress('transform X_test')
if not self.cv:
if X_eval is None or y_eval is None:
eval_size = self.experiment.eval_size
if self.train_test_split_strategy == 'adversarial_validation' and X_test is not None:
logger.debug('DriftDetector.train_test_split')
detector = dd.DriftDetector()
detector.fit(X_train, X_test)
self.detector_ = detector
X_train, X_eval, y_train, y_eval = \
detector.train_test_split(X_train, y_train, test_size=eval_size)
else:
if self.task == const.TASK_REGRESSION or dex.is_dask_object(X_train):
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state)
else:
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state, stratify=y_train)
if self.task != const.TASK_REGRESSION:
y_train_uniques = set(y_train.unique()) if hasattr(y_train, 'unique') else set(y_train)
y_eval_uniques = set(y_eval.unique()) if hasattr(y_eval, 'unique') else set(y_eval)
assert y_train_uniques == y_eval_uniques, \
'The classes of `y_train` and `y_eval` must be equal. Try to increase eval_size.'
self.step_progress('split into train set and eval set')
else:
X_eval, y_eval = data_cleaner.transform(X_eval, y_eval)
self.step_progress('transform eval set')
selected_features = X_train.columns.to_list()
data_shapes = {'X_train.shape': X_train.shape,
'y_train.shape': y_train.shape,
'X_eval.shape': None if X_eval is None else X_eval.shape,
'y_eval.shape': None if y_eval is None else y_eval.shape,
'X_test.shape': None if X_test is None else X_test.shape
}
if dex.exist_dask_object(X_train, y_train, X_eval, y_eval, X_test):
data_shapes = {k: dex.compute(v) if v is not None else None
for k, v in data_shapes.items()}
logger.info(f'{self.name} keep {len(selected_features)} columns')
self.selected_features_ = selected_features
self.data_cleaner_ = data_cleaner
self.data_shapes_ = data_shapes
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def cache_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
# 1. Clean Data
if self.cv and X_eval is not None and y_eval is not None:
logger.info(f'{self.name} cv enabled, so concat train data and eval data')
X_train = dex.concat_df([X_train, X_eval], axis=0)
y_train = dex.concat_df([y_train, y_eval], axis=0)
X_eval = None
y_eval = None
data_cleaner = self.data_cleaner_
logger.info(f'{self.name} transform train data')
X_train, y_train = data_cleaner.transform(X_train, y_train)
self.step_progress('fit_transform train set')
if X_test is not None:
logger.info(f'{self.name} transform test data')
X_test = data_cleaner.transform(X_test)
self.step_progress('transform X_test')
if not self.cv:
if X_eval is None or y_eval is None:
eval_size = self.experiment.eval_size
if self.train_test_split_strategy == 'adversarial_validation' and X_test is not None:
logger.debug('DriftDetector.train_test_split')
detector = self.detector_
X_train, X_eval, y_train, y_eval = \
detector.train_test_split(X_train, y_train, test_size=eval_size)
else:
if self.task == const.TASK_REGRESSION or dex.is_dask_object(X_train):
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state)
else:
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state, stratify=y_train)
if self.task != const.TASK_REGRESSION:
y_train_uniques = set(y_train.unique()) if hasattr(y_train, 'unique') else set(y_train)
y_eval_uniques = set(y_eval.unique()) if hasattr(y_eval, 'unique') else set(y_eval)
assert y_train_uniques == y_eval_uniques, \
'The classes of `y_train` and `y_eval` must be equal. Try to increase eval_size.'
self.step_progress('split into train set and eval set')
else:
X_eval, y_eval = data_cleaner.transform(X_eval, y_eval)
self.step_progress('transform eval set')
selected_features = self.selected_features_
data_shapes = {'X_train.shape': X_train.shape,
'y_train.shape': y_train.shape,
'X_eval.shape': None if X_eval is None else X_eval.shape,
'y_eval.shape': None if y_eval is None else y_eval.shape,
'X_test.shape': None if X_test is None else X_test.shape
}
if dex.exist_dask_object(X_train, y_train, X_eval, y_eval, X_test):
data_shapes = {k: dex.compute(v) if v is not None else None
for k, v in data_shapes.items()}
logger.info(f'{self.name} keep {len(selected_features)} columns')
self.data_shapes_ = data_shapes
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
return self.data_cleaner_.transform(X, y, **kwargs)
def get_fitted_params(self):
dc = self.data_cleaner_
def get_reason(c):
if dc is None:
return 'unknown'
if dc.dropped_constant_columns_ is not None and c in dc.dropped_constant_columns_:
return 'constant'
elif dc.dropped_idness_columns_ is not None and c in dc.dropped_idness_columns_:
return 'idness'
elif dc.dropped_duplicated_columns_ is not None and c in dc.dropped_duplicated_columns_:
return 'duplicated'
else:
return 'others'
params = super().get_fitted_params()
data_shapes = self.data_shapes_ if self.data_shapes_ is not None else {}
unselected_features = params.get('unselected_features', [])
if dc is not None:
unselected_reason = {f: get_reason(f) for f in unselected_features}
else:
unselected_reason = None
return {**params,
**data_shapes,
'unselected_reason': unselected_reason,
}
class TransformerAdaptorStep(ExperimentStep):
def __init__(self, experiment, name, transformer_creator, **kwargs):
assert transformer_creator is not None
self.transformer_creator = transformer_creator
self.transformer_kwargs = kwargs
super(TransformerAdaptorStep, self).__init__(experiment, name)
# fitted
self.transformer_ = None
@cache(arg_keys='X_train, y_train, X_test, X_eval, y_eval',
strategy='transform', transformer='cache_transform',
attrs_to_restore='transformer_kwargs,transformer_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
logger.info(f'{self.name} fit')
init_kwargs = self.transformer_kwargs.copy()
if 'task' in init_kwargs.keys():
init_kwargs['task'] = self.task
transformer = self.transformer_creator(**init_kwargs)
transformer.fit(X_train, y_train, **kwargs)
self.transformer_ = transformer
return self.cache_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval,
**kwargs)
def cache_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
logger.info(f'{self.name} cache_transform')
transformer = self.transformer_
X_train = transformer.transform(X_train)
if X_eval is not None:
X_eval = transformer.transform(X_eval, y_eval)
if X_test is not None:
X_test = transformer.transform(X_test)
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
logger.info(f'{self.name} transform')
if y is None:
return self.transformer_.transform(X)
else:
return self.transformer_.transform(X, y)
def __getattribute__(self, item):
try:
return super(TransformerAdaptorStep, self).__getattribute__(item)
except AttributeError as e:
transformer_kwargs = self.transformer_kwargs
if item in transformer_kwargs.keys():
return transformer_kwargs[item]
else:
raise e
def __dir__(self):
transformer_kwargs = self.transformer_kwargs
return set(super(TransformerAdaptorStep, self).__dir__()).union(set(transformer_kwargs.keys()))
class FeatureGenerationStep(TransformerAdaptorStep):
def __init__(self, experiment, name,
trans_primitives=None,
continuous_cols=None,
datetime_cols=None,
categories_cols=None,
latlong_cols=None,
text_cols=None,
max_depth=1,
feature_selection_args=None):
from hypernets.tabular.feature_generators import FeatureGenerationTransformer
drop_cols = []
if text_cols is not None:
drop_cols += list(text_cols)
if latlong_cols is not None:
drop_cols += list(latlong_cols)
super(FeatureGenerationStep, self).__init__(experiment, name,
FeatureGenerationTransformer,
trans_primitives=trans_primitives,
fix_input=False,
continuous_cols=continuous_cols,
datetime_cols=datetime_cols,
categories_cols=categories_cols,
latlong_cols=latlong_cols,
text_cols=text_cols,
drop_cols=drop_cols if len(drop_cols) > 0 else None,
max_depth=max_depth,
feature_selection_args=feature_selection_args,
task=None, # fixed by super
)
def get_fitted_params(self):
t = self.transformer_
return {**super(FeatureGenerationStep, self).get_fitted_params(),
'trans_primitives': t.trans_primitives if t is not None else None,
'output_feature_names': t.transformed_feature_names_ if t is not None else None,
}
class MulticollinearityDetectStep(FeatureSelectStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.corr_linkage_ = None
@cache(arg_keys='X_train',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,corr_linkage_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
corr_linkage, remained, dropped = select_by_multicollinearity(X_train)
self.step_progress('calc correlation')
if dropped:
self.selected_features_ = remained
X_train = X_train[self.selected_features_]
if X_eval is not None:
X_eval = X_eval[self.selected_features_]
if X_test is not None:
X_test = X_test[self.selected_features_]
self.step_progress('drop features')
else:
self.selected_features_ = None
self.corr_linkage_ = corr_linkage
logger.info(f'{self.name} drop {len(dropped)} columns, {len(remained)} kept')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'corr_linkage': self.corr_linkage_,
}
class DriftDetectStep(FeatureSelectStep):
def __init__(self, experiment, name, remove_shift_variable, variable_shift_threshold,
threshold, remove_size, min_features, num_folds):
super().__init__(experiment, name)
self.remove_shift_variable = remove_shift_variable
self.variable_shift_threshold = variable_shift_threshold
self.threshold = threshold
self.remove_size = remove_size if 1.0 > remove_size > 0 else 0.1
self.min_features = min_features if min_features > 1 else 10
self.num_folds = num_folds if num_folds > 1 else 5
# fitted
self.history_ = None
self.scores_ = None
@cache(arg_keys='X_train,X_test',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,history_,scores_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if X_test is not None:
features, history, scores = dd.feature_selection(X_train, X_test,
remove_shift_variable=self.remove_shift_variable,
variable_shift_threshold=self.variable_shift_threshold,
auc_threshold=self.threshold,
min_features=self.min_features,
remove_size=self.remove_size,
cv=self.num_folds)
dropped = set(X_train.columns.to_list()) - set(features)
if dropped:
self.selected_features_ = features
X_train = X_train[features]
X_test = X_test[features]
if X_eval is not None:
X_eval = X_eval[features]
else:
self.selected_features_ = None
self.history_ = history
self.scores_ = scores
logger.info(f'{self.name} drop {len(dropped)} columns, {len(features)} kept')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'history': self.history_,
'scores': self.scores_,
}
class FeatureImportanceSelectionStep(FeatureSelectStep):
def __init__(self, experiment, name, strategy, threshold, quantile, number):
super(FeatureImportanceSelectionStep, self).__init__(experiment, name)
self.strategy = strategy
self.threshold = threshold
self.quantile = quantile
self.number = number
# fitted
self.importances_ = None
@cache(arg_keys='X_train,y_train',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,importances_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
preprocessor = general_preprocessor(X_train)
estimator = general_estimator(X_train, task=self.task)
estimator.fit(preprocessor.fit_transform(X_train, y_train), y_train)
importances = estimator.feature_importances_
self.step_progress('training general estimator')
selected, unselected = \
select_by_feature_importance(importances, self.strategy,
threshold=self.threshold,
quantile=self.quantile,
number=self.number)
features = X_train.columns.to_list()
selected_features = [features[i] for i in selected]
unselected_features = [features[i] for i in unselected]
self.step_progress('select by importances')
if unselected_features:
X_train = X_train[selected_features]
if X_eval is not None:
X_eval = X_eval[selected_features]
if X_test is not None:
X_test = X_test[selected_features]
self.step_progress('drop features')
logger.info(f'{self.name} drop {len(unselected_features)} columns, {len(selected_features)} kept')
self.selected_features_ = selected_features if len(unselected_features) > 0 else None
self.importances_ = importances
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'importances': self.importances_,
}
class PermutationImportanceSelectionStep(FeatureSelectStep):
def __init__(self, experiment, name, scorer, estimator_size,
strategy, threshold, quantile, number):
assert scorer is not None
super().__init__(experiment, name)
self.scorer = scorer
self.estimator_size = estimator_size
self.strategy = strategy
self.threshold = threshold
self.quantile = quantile
self.number = number
# fixed
self.importances_ = None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
best_trials = hyper_model.get_top_trials(self.estimator_size)
estimators = [hyper_model.load_estimator(trial.model_file) for trial in best_trials]
self.step_progress('load estimators')
if X_eval is None or y_eval is None:
importances = permutation_importance_batch(estimators, X_train, y_train, self.scorer, n_repeats=5)
else:
importances = permutation_importance_batch(estimators, X_eval, y_eval, self.scorer, n_repeats=5)
# feature_index = np.argwhere(importances.importances_mean < self.threshold)
# selected_features = [feat for i, feat in enumerate(X_train.columns.to_list()) if i not in feature_index]
# unselected_features = list(set(X_train.columns.to_list()) - set(selected_features))
selected, unselected = select_by_feature_importance(importances.importances_mean,
self.strategy,
threshold=self.threshold,
quantile=self.quantile,
number=self.number)
if len(selected) > 0:
selected_features = [importances.columns[i] for i in selected]
unselected_features = [importances.columns[i] for i in unselected]
else:
msg = f'{self.name}: All features will be dropped with importance:{importances.importances_mean},' \
f' so drop nothing. Change settings and try again pls.'
logger.warning(msg)
selected_features = importances.columns
unselected_features = []
self.step_progress('calc importance')
if unselected_features:
X_train = X_train[selected_features]
if X_eval is not None:
X_eval = X_eval[selected_features]
if X_test is not None:
X_test = X_test[selected_features]
self.step_progress('drop features')
logger.info(f'{self.name} drop {len(unselected_features)} columns, {len(selected_features)} kept')
self.selected_features_ = selected_features if len(unselected_features) > 0 else None
self.importances_ = importances
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'importances': self.importances_,
}
class SpaceSearchStep(ExperimentStep):
def __init__(self, experiment, name, cv=False, num_folds=3):
super().__init__(experiment, name)
self.cv = cv
self.num_folds = num_folds
# fitted
self.history_ = None
self.best_reward_ = None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if not dex.is_dask_object(X_eval):
kwargs['eval_set'] = (X_eval, y_eval)
model = copy.deepcopy(self.experiment.hyper_model) # copy from original hyper_model instance
model.search(X_train, y_train, X_eval, y_eval, cv=self.cv, num_folds=self.num_folds, **kwargs)
if model.get_best_trial() is None or model.get_best_trial().reward == 0:
raise RuntimeError('Not found available trial, change experiment settings and try again pls.')
logger.info(f'{self.name} best_reward: {model.get_best_trial().reward}')
self.history_ = model.history
self.best_reward_ = model.get_best_trial().reward
return model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def get_fitted_params(self):
return {**super().get_fitted_params(),
'best_reward': self.best_reward_,
'history': self.history_,
}
class DaskSpaceSearchStep(SpaceSearchStep):
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
X_train, y_train, X_test, X_eval, y_eval = \
[v.persist() if dex.is_dask_object(v) else v for v in (X_train, y_train, X_test, X_eval, y_eval)]
return super().fit_transform(hyper_model, X_train, y_train, X_test, X_eval, y_eval, **kwargs)
class EstimatorBuilderStep(ExperimentStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.estimator_ = None
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def get_fitted_params(self):
return {**super().get_fitted_params(),
'estimator': self.estimator_,
}
class EnsembleStep(EstimatorBuilderStep):
def __init__(self, experiment, name, scorer=None, ensemble_size=7):
assert ensemble_size > 1
super().__init__(experiment, name)
self.scorer = scorer if scorer is not None else get_scorer('neg_log_loss')
self.ensemble_size = ensemble_size
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
best_trials = hyper_model.get_top_trials(self.ensemble_size)
estimators = [hyper_model.load_estimator(trial.model_file) for trial in best_trials]
ensemble = self.get_ensemble(estimators, X_train, y_train)
if all(['oof' in trial.memo.keys() for trial in best_trials]):
logger.info('ensemble with oofs')
oofs = self.get_ensemble_predictions(best_trials, ensemble)
assert oofs is not None
if hasattr(oofs, 'shape'):
y_, oofs_ = select_valid_oof(y_train, oofs)
ensemble.fit(None, y_, oofs_)
else:
ensemble.fit(None, y_train, oofs)
else:
ensemble.fit(X_eval, y_eval)
self.estimator_ = ensemble
logger.info(f'ensemble info: {ensemble}')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_ensemble(self, estimators, X_train, y_train):
return GreedyEnsemble(self.task, estimators, scoring=self.scorer, ensemble_size=self.ensemble_size)
def get_ensemble_predictions(self, trials, ensemble):
oofs = None
for i, trial in enumerate(trials):
if 'oof' in trial.memo.keys():
oof = trial.memo['oof']
if oofs is None:
if len(oof.shape) == 1:
oofs = np.zeros((oof.shape[0], len(trials)), dtype=np.float64)
else:
oofs = np.zeros((oof.shape[0], len(trials), oof.shape[-1]), dtype=np.float64)
oofs[:, i] = oof
return oofs
class DaskEnsembleStep(EnsembleStep):
def get_ensemble(self, estimators, X_train, y_train):
if dex.exist_dask_object(X_train, y_train):
predict_kwargs = {}
if all(['use_cache' in inspect.signature(est.predict).parameters.keys()
for est in estimators]):
predict_kwargs['use_cache'] = False
return DaskGreedyEnsemble(self.task, estimators, scoring=self.scorer,
ensemble_size=self.ensemble_size,
predict_kwargs=predict_kwargs)
return super().get_ensemble(estimators, X_train, y_train)
def get_ensemble_predictions(self, trials, ensemble):
if isinstance(ensemble, DaskGreedyEnsemble):
oofs = [trial.memo.get('oof') for trial in trials]
return oofs if any([oof is not None for oof in oofs]) else None
return super().get_ensemble_predictions(trials, ensemble)
class FinalTrainStep(EstimatorBuilderStep):
def __init__(self, experiment, name, retrain_on_wholedata=False):
super().__init__(experiment, name)
self.retrain_on_wholedata = retrain_on_wholedata
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if self.retrain_on_wholedata:
trial = hyper_model.get_best_trial()
X_all = dex.concat_df([X_train, X_eval], axis=0)
y_all = dex.concat_df([y_train, y_eval], axis=0)
estimator = hyper_model.final_train(trial.space_sample, X_all, y_all, **kwargs)
else:
estimator = hyper_model.load_estimator(hyper_model.get_best_trial().model_file)
self.estimator_ = estimator
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
class PseudoLabelStep(ExperimentStep):
def __init__(self, experiment, name, estimator_builder,
strategy=None, proba_threshold=None, proba_quantile=None, sample_number=None,
resplit=False, random_state=None):
super().__init__(experiment, name)
assert hasattr(estimator_builder, 'estimator_')
self.estimator_builder = estimator_builder
self.strategy = strategy
self.proba_threshold = proba_threshold
self.proba_quantile = proba_quantile
self.sample_number = sample_number
self.resplit = resplit
self.random_state = random_state
self.plot_sample_size = 3000
# fitted
self.test_proba_ = None
self.pseudo_label_stat_ = None
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
# build estimator
hyper_model, X_train, y_train, X_test, X_eval, y_eval = \
self.estimator_builder.fit_transform(hyper_model, X_train, y_train, X_test=X_test,
X_eval=X_eval, y_eval=y_eval, **kwargs)
estimator = self.estimator_builder.estimator_
# start here
X_pseudo = None
y_pseudo = None
test_proba = None
pseudo_label_stat = None
if self.task in [const.TASK_BINARY, const.TASK_MULTICLASS] and X_test is not None:
proba = estimator.predict_proba(X_test)
classes = estimator.classes_
X_pseudo, y_pseudo = sample_by_pseudo_labeling(X_test, classes, proba,
strategy=self.strategy,
threshold=self.proba_threshold,
quantile=self.proba_quantile,
number=self.sample_number,
)
pseudo_label_stat = self.stat_pseudo_label(y_pseudo, classes)
test_proba = dex.compute(proba)[0] if dex.is_dask_object(proba) else proba
if test_proba.shape[0] > self.plot_sample_size:
test_proba, _ = dex.train_test_split(test_proba,
train_size=self.plot_sample_size,
random_state=self.random_state)
if X_pseudo is not None:
X_train, y_train, X_eval, y_eval = \
self.merge_pseudo_label(X_train, y_train, X_eval, y_eval, X_pseudo, y_pseudo)
self.test_proba_ = test_proba
self.pseudo_label_stat_ = pseudo_label_stat
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
@staticmethod
def stat_pseudo_label(y_pseudo, classes):
stat = OrderedDict()
if dex.is_dask_object(y_pseudo):
u = dex.da.unique(y_pseudo, return_counts=True)
u = dex.compute(u)[0]
else:
u = np.unique(y_pseudo, return_counts=True)
u = {c: n for c, n in zip(*u)}
for c in classes:
stat[c] = u[c] if c in u.keys() else 0
return stat
def merge_pseudo_label(self, X_train, y_train, X_eval, y_eval, X_pseudo, y_pseudo, **kwargs):
if self.resplit:
x_list = [X_train, X_pseudo]
y_list = [y_train, pd.Series(y_pseudo)]
if X_eval is not None and y_eval is not None:
x_list.append(X_eval)
y_list.append(y_eval)
X_mix =
|
pd.concat(x_list, axis=0, ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
# author: <NAME>
# Email: <EMAIL>
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import generators
from __future__ import with_statement
import re
from bs4 import BeautifulSoup
from concurrent import futures
import os
import sys
import traceback
import time
import datetime
import pandas as pd
import requests
import json
import shutil
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from fake_useragent import UserAgent
from openpyxl import load_workbook
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.header import Header
############ 全局变量初始化 ##############
HEADERS = dict()
# 并发线程数
NUM_THREADS = None
# 城市选择
city_dict = {
"成都": "cd",
"北京": "bj",
"上海": "sh",
"广州": "gz",
"深圳": "sz",
"南京": "nj",
"合肥": "hf",
"杭州": "hz",
}
# 是否打印HTTP错误
PRINT = True if ((len(sys.argv) > 1) and (sys.argv[1] == 'true')) else False
# 伪造User-Agent库初始化
ua = UserAgent()
# 不使用代理
proxies = None
WORKPATH="/home/frank/workspace/lianjia/data"
CITY = city_dict["北京"]
""" HTTP GET 操作封装 """
def get_bs_obj_from_url(http_url):
done = False
exception_time = 0
HEADERS["User-Agent"] = ua.random
while not done:
try:
if PRINT:
print("正在获取 {}".format(http_url))
r = requests.get(http_url, headers=HEADERS, proxies=proxies, timeout=3)
bs_obj = BeautifulSoup(r.text, "lxml")
done = True
except Exception as e:
if PRINT:
print(e)
exception_time += 1
time.sleep(1)
if exception_time > 10:
return None
return bs_obj
""" 判断一个字符串是否可以转成数字 """
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def esf_mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print("{} create successfully.".format(path))
return True
else:
print("{} already exist.".format(path))
return False
def get_district_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
district_list = [a.attrs["href"].replace("/ershoufang/", "")[:-1]
for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(district_list)))
return district_list
def get_district_name_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
name_list = [a.get_text() for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(name_list)))
return name_list
def get_esf_from_district(city, district):
http_url = "http://{}.lianjia.com/ershoufang/{}".format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
esf_list = []
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
#try again
try:
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
return esf_list
print("---district {} total ershoufang numbers: {}---".format(district, total_esf_num))
if total_esf_num == 0:
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
for price in range(1, 9):
esf_list_partial = get_esf_id_in_price(city, district, price)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
def get_esf_id_in_price(city, district, price):
http_url = "http://{}.lianjia.com/ershoufang/{}/p{}".format(city, district, price)
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = 0
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
print(" price {} get error.".format(price))
pass
#print("------price {} total : {}---".format(price, total_esf_num))
esf_list = []
if total_esf_num == 0:
print(" price {} finish---done.".format(price))
return esf_list
try:
page_box = bs_obj.find("div", {"class": "page-box house-lst-page-box"})
total_pages = int(json.loads(page_box.attrs["page-data"])["totalPage"])
except Exception as e:
print(" price {} page get error.".format(price))
return esf_list
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for page_no in range(1, total_pages + 1):
future_list.append(executor.submit(get_esf_id_in_page, city, district, price, page_no))
fail_list = []
count = 0
for future in futures.as_completed(future_list):
page_no, esf_list_partial = future.result()
if esf_list_partial is None or len(esf_list_partial) == 0:
fail_list.append(page_no)
else:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
for page_no in fail_list:
_, esf_list_partial = get_esf_id_in_page(city, district, price, page_no)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
print("---done.")
return esf_list
def get_esf_id_in_page(city, district, price, page_no):
http_url = "http://{}.lianjia.com/ershoufang/{}/pg{}p{}".format(city, district, page_no, price)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
print("get ershoufang id, price {} page {} is none".format(price, page_no))
return None
parent_list = bs_obj.find_all("li", {"class": "clear"})
esf_list = []
if not (len(parent_list) == 0):
for li in parent_list:
esf_url = str(li.find("div", {"class": "title"}).find("a").attrs["href"])
esf_id = "".join(list(filter(str.isdigit, esf_url)))
esf_list.append(esf_id)
return page_no, esf_list
def get_esf_of_city(city):
district_list = get_district_from_city(city)
esf_list = []
for district in district_list:
esf_of_district = get_esf_from_district(city, district)
esf_list += esf_of_district
esf_list = sorted(set(esf_list), key=esf_list.index)
return esf_list
def get_esf_info(city, esf_id):
http_url = "https://{}.lianjia.com/ershoufang/{}.html".format(city, esf_id)
bs_obj = get_bs_obj_from_url(http_url)
df = pd.DataFrame()
if bs_obj is not None:
try:
test = bs_obj.find("div", {"class": "icon-404 icon fl"})
if test is not None:
return esf_id, df
total_price = bs_obj.find("span", {"class": "total"}).get_text()
if not is_number(total_price):
return esf_id, df
unit_price = bs_obj.find("div", {"class": "unitPrice"}).get_text().replace("元/平米", "")
huxing = bs_obj.find("div", {"class": "room"}).find("div", {"class": "mainInfo"}).get_text()
xiaoqu = bs_obj.find("div", {"class": "communityName"}).find("a").get_text()
area_info = bs_obj.find("div", {"class": "areaName"}).find_all("a")
chengqu = area_info[0].get_text()
quyu = area_info[1].get_text()
base_info = bs_obj.find("div", {"class": "newwrap baseinform"})
# 基本属性
base = base_info.find("div", {"class": "base"}).get_text()
louceng = None if "所在楼层" not in base else base.split("所在楼层")[1].split("(")[0]
zonglouceng = None if "所在楼层" not in base else base.split("(共")[1].split("层")[0]
jianzhumianji = None if "建筑面积" not in base else base.split("建筑面积")[1].split("㎡")[0]
if not is_number(jianzhumianji):
return esf_id, df
huxingjiegou = None if "户型结构" not in base else base.split("户型结构")[1].split("\n")[0]
if "套内面积" not in base:
taoneimianji = None
elif "暂无数据" in base.split("套内面积")[1].split("\n")[0]:
taoneimianji = None
else:
taoneimianji = base.split("套内面积")[1].split("㎡")[0]
jianzhuleixing = None if "建筑类型" not in base else base.split("建筑类型")[1].split("\n")[0]
chaoxiang = None if "房屋朝向" not in base else base.split("房屋朝向")[1].split("\n")[0]
jianzhujiegou = None if "建筑结构" not in base else base.split("建筑结构")[1].split("\n")[0]
zhuangxiu = None if "装修情况" not in base else base.split("装修情况")[1].split("\n")[0]
tihubili = None if "梯户比例" not in base else base.split("梯户比例")[1].split("\n")[0]
gongnuan = None if "供暖方式" not in base else base.split("供暖方式")[1].split("\n")[0]
dianti = None if "配备电梯" not in base else base.split("配备电梯")[1].split("\n")[0]
chanquan = None if "产权年限" not in base else base.split("产权年限")[1].split("\n")[0]
yongshui = "商水" if base_info.find(text="商水") is not None else "民水"
yongdian = "商电" if base_info.find(text="商电") is not None else "民电"
# 交易属性
trans = base_info.find("div", {"class": "transaction"}).get_text()
guapaishijian = None if "挂牌时间" not in trans else trans.split("挂牌时间")[1].strip().split("\n")[0]
jiaoyiquanshu = None if "交易权属" not in trans else trans.split("交易权属")[1].strip().split("\n")[0]
fangwuyongtu = None if "房屋用途" not in trans else trans.split("房屋用途")[1].strip().split("\n")[0]
fangwunianxian = None if "房屋年限" not in trans else trans.split("房屋年限")[1].strip().split("\n")[0]
chanquansuoshu = None if "产权所属" not in trans else trans.split("产权所属")[1].strip().split("\n")[0]
diyaxinxi = None if "抵押信息" not in trans else trans.split("抵押信息")[1].strip().split("\n")[0]
df = pd.DataFrame(index=[esf_id], data=[[http_url, chengqu, quyu, xiaoqu,
huxing, total_price, unit_price, jianzhumianji,
taoneimianji, chaoxiang, louceng, zonglouceng,
huxingjiegou, jianzhuleixing, jianzhujiegou,
fangwuyongtu, jiaoyiquanshu, fangwunianxian,
guapaishijian, zhuangxiu, tihubili, gongnuan,
dianti, chanquan, yongshui, yongdian,
chanquansuoshu, diyaxinxi]],
columns=["URL", "城区", "片区", "小区",
"户型", "总价", "单价", "建筑面积",
"套内面积", "朝向", "楼层", "总楼层",
"户型结构", "建筑类型", "建筑结构",
"房屋用途", "交易权属", "房屋年限",
"挂牌时间", "装修", "梯户比例", "供暖",
"配备电梯", "产权", "用水", "用电",
"产权所属", "抵押信息"])
except Exception as e:
print("[E]: get_esf_info, esf_id =", esf_id, e)
traceback.print_exc()
pass
return esf_id, df
def get_esf_info_from_esf_list(city, esf_list):
df_esf_info = pd.DataFrame()
count = 0
pct = 0
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for esf in esf_list:
future_list.append(executor.submit(get_esf_info, city, esf))
fail_list = []
#print(" ")
for future in futures.as_completed(future_list):
esf, df_info_partial = future.result()
if len(df_info_partial) == 0:
fail_list.append(esf)
else:
df_esf_info = df_esf_info.append(df_info_partial)
count += 1
sys.stdout.write("\rget ershoufang info: {}/{}".format(count, len(esf_list)))
for esf in fail_list:
_, df_info_partial = get_esf_info(city, esf)
if len(df_info_partial) > 0:
df_esf_info = df_esf_info.append(df_info_partial)
count += 1
sys.stdout.write("\rget ershoufang info: {}/{}".format(count, len(esf_list)))
print(" ")
return df_esf_info
def compare_two_list(new_esf_list, old_esf_list):
add_list = []
remove_list = []
same_list = []
for esf_id in new_esf_list:
if esf_id not in old_esf_list:
add_list.append(esf_id)
else:
same_list.append(esf_id)
for esf_id in old_esf_list:
if esf_id not in new_esf_list:
remove_list.append(esf_id)
return add_list, remove_list, same_list
def excel_add_sheet(dataframe, filename, sheetname, indexname):
excelwriter = pd.ExcelWriter(filename)
book = load_workbook(excelwriter.path)
excelwriter.book = book
dataframe.to_excel(excelwriter, sheetname, index_label=indexname)
excelwriter.close()
return
def get_price_changed_esf_info(same_list, new_esf_info, old_esf_info):
df_jiang = pd.DataFrame()
df_zhang = pd.DataFrame()
count = 0
for esf_id in same_list:
try:
new_price = new_esf_info.loc[[esf_id]]["总价"].values[0]
old_price = old_esf_info.loc[[esf_id]]["总价"].values[0]
old_unit_price = old_esf_info.loc[esf_id]["单价"]
new_info = new_esf_info.loc[[esf_id]]
if new_price > old_price:
new_info.insert(loc=6, column="原总价", value=old_price)
new_info.insert(loc=7, column="涨价", value=(new_price-old_price))
zhangfu=format(((new_price-old_price)/old_price), '.2%')
new_info.insert(loc=8, column="涨幅", value=zhangfu)
new_info.insert(loc=10, column="原单价", value=old_unit_price)
df_zhang = df_zhang.append(new_info)
elif new_price < old_price:
new_info.insert(loc=6, column="原总价", value=old_price)
new_info.insert(loc=7, column="降价", value=(old_price-new_price))
diefu=format(((old_price-new_price)/old_price), '.2%')
new_info.insert(loc=8, column="降幅", value=diefu)
new_info.insert(loc=10, column="原单价", value=old_unit_price)
df_jiang = df_jiang.append(new_info)
else:
pass
except Exception as e:
print("[E]: get_price_changed, esf_id =", esf_id, e)
pass
count += 1
sys.stdout.write("\rget price change info: {}/{}".format(count, len(same_list)))
print(" ")
return df_jiang, df_zhang
def get_chengjiao_yesterday(city):
district_list = get_district_from_city(city)
chengjiao = 0
for district in district_list:
http_url = 'https://{}.lianjia.com/fangjia/{}'.format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
chengjiao += 0
continue
item = bs_obj.find("div", {"class": "item item-1-2"})
if item is None:
chengjiao += 0
continue
num = item.find("div", {"class": "num"}).find("span").get_text()
chengjiao += (0 if "暂无数据" in num else int(num))
return chengjiao
def get_lianjia_fangjia_info(city):
try:
http_url = 'https://{}.lianjia.com/fangjia'.format(city)
bs_obj = get_bs_obj_from_url(http_url)
tongji = bs_obj.find("div", {"class": "box-l-b"})
lj_all = tongji.find_all("div", {"class": "num"})
lj_new = lj_all[0].get_text()
lj_ren = lj_all[1].get_text()
lj_kan = lj_all[2].get_text()
except Exception as e:
lj_new, lj_ren, lj_kan = get_lianjia_fangjia_info(city)
return lj_new, lj_ren, lj_kan
def get_tongji_info(city, filename):
lj_new, lj_ren, lj_kan = get_lianjia_fangjia_info(city)
chengjiao = get_chengjiao_yesterday(city)
new_str = datetime.date.today().strftime('%Y-%m-%d')
total_info = pd.read_excel(filename, sheet_name="total", index_col=0)
total_list = total_info.index.values
new_info = pd.read_excel(filename, sheet_name="新上", index_col=0)
new_list = new_info.index.values
rm_info = pd.read_excel(filename, sheet_name="下架", index_col=0)
rm_list = rm_info.index.values
jiang_info = pd.read_excel(filename, sheet_name="降价", index_col=0)
jiang_list = jiang_info.index.values
zhang_info = pd.read_excel(filename, sheet_name="涨价", index_col=0)
zhang_list = zhang_info.index.values
junjia = format(sum(total_info['总价']) * 10000 / sum(total_info['建筑面积']), '.2f')
jiangfu = (jiang_info['降幅'].str.strip("%").astype(float)/100) if len(jiang_list) else 0
junjiang = (format(sum(jiangfu) / len(jiangfu), '.2%')) if len(jiang_list) else 0
zhangfu = (zhang_info['涨幅'].str.strip("%").astype(float)/100) if len(zhang_list) else 0
junzhang = (format(sum(zhangfu) / len(zhangfu), '.2%')) if len(zhang_list) else 0
data=[[len(total_list), junjia, chengjiao, len(new_list), len(rm_list),
len(jiang_list), junjiang, len(zhang_list), junzhang, lj_new,
lj_ren, lj_kan]]
columns=['总数', '均价', '成交', '上架', '下架', '降价', '降幅', '涨价',
'涨幅', '新上', '新客户', '带看']
name_list = get_district_name_from_city(city)
for name in name_list:
chengqu = total_info[total_info['城区']==name]
avg_price = format(sum(chengqu['总价']) * 10000 /
sum(chengqu['建筑面积']), '.2f') if len(chengqu) else 0
data[0].append(avg_price)
columns.append(name)
info =
|
pd.DataFrame(index=[new_str], data=data, columns=columns)
|
pandas.DataFrame
|
#Authors: <NAME>, <NAME>
import datetime as dt
import operator
import datarobot as dr
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from statsmodels.tsa.stattools import pacf
from ts_metrics import *
from ts_modeling import create_dr_project
from ts_projects import get_preds_and_actuals
####################
# Series Clustering
####################
def _split_series(df, series_id, target, by='quantiles', cuts=5, split_col='Cluster'):
"""
Split series into clusters by rank or quantile of average target value
by: str
Rank or quantiles
cuts: int
Number of clusters
split_col: str
Name of new column
Returns:
--------
pandas df
"""
group = df.groupby([series_id]).mean()
if by == 'quantiles':
group[split_col] = pd.qcut(group[target], cuts, labels=np.arange(1, cuts + 1))
elif by == 'rank':
group[split_col] = pd.cut(group[target], cuts, labels=np.arange(1, cuts + 1))
else:
raise ValueError(f'{by} is not a supported value. Must be set to either quantiles or rank')
df = df.merge(
group[split_col], how='left', left_on=series_id, right_index=True, validate='many_to_one'
)
df[split_col] = df[split_col].astype('str')
n_clusters = len(df[split_col].unique())
mapper_clusters = {k: v for (k, v) in zip(df[split_col].unique(), range(1, n_clusters + 1))}
df[split_col] = df[split_col].map(mapper_clusters)
return df.reset_index(drop=True)
def _get_pacf_coefs(df, col, nlags, alpha, scale, scale_method):
"""
Helper function for add_cluster_labels()
df: pandas df
col: str
Series name
nlags: int
Number of AR coefficients to include in pacf
alpha: float
Cutoff value for p-values to determine statistical significance
scale: boolean
Whether to standardize input data
scale_method: str
Choose from 'min_max' or 'normalize'
Returns:
--------
List of AR(n) coefficients
"""
if scale:
if scale_method == 'min_max':
df = df.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)), axis=0)
elif scale_method == 'normalize':
df = df.apply(lambda x: (x - np.mean(x)) / np.std(x), axis=0)
else:
raise ValueError(
f'{scale_method} is not a supported value. scale_method must be set to either min_max or normalize'
)
# if df[col].dropna().shape[0] == 0:
# print(col, df[col].dropna())
# print('Running PAC...')
clf = pacf(df[col].dropna(), method='ols', nlags=nlags, alpha=alpha)
if alpha:
coefs = clf[0][1:]
zero_in_interval = [not i[0] < 0 < i[1] for i in clf[1][1:]]
adj_coefs = [c if z else 0.0 for c, z in zip(coefs, zero_in_interval)]
return adj_coefs
else:
coefs = clf[1:]
return coefs
def _get_optimal_n_clusters(df, n_series, max_clusters, plot=True):
"""
Helper function for add_cluster_labels()
Get the number of clusters that results in the max silhouette score
Returns:
--------
int
"""
clusters = list(np.arange(min(max_clusters, n_series)) + 2)[:-1]
print(f'Testing {clusters[0]} to {clusters[-1]} clusters')
scores = {}
d = []
for c in clusters:
kmean = KMeans(n_clusters=c).fit(df)
d.append(sum(np.min(cdist(df, kmean.cluster_centers_, 'euclidean'), axis=1)) / df.shape[0])
preds = kmean.predict(df)
score = silhouette_score(df, preds, metric='euclidean')
scores[c] = score
print(f'For n_clusters = {c}, silhouette score is {score}')
n_clusters = max(scores.items(), key=operator.itemgetter(1))[0]
best_score = scores[n_clusters]
print(f'optimal n_clusters = {n_clusters}, max silhouette score is {best_score}')
if max_clusters > 2:
if plot:
fig = px.line(x=clusters, y=d)
fig.update_layout(height=500, width=750, title_text='Kmeans Optimal Number of Clusters')
fig.update_xaxes(title='Number of Clusters', range=[clusters[0], clusters[-1]])
fig.update_yaxes(title='Distortion')
fig.show()
return n_clusters
def add_cluster_labels(
df,
ts_settings,
method,
nlags=None,
scale=True,
scale_method='min_max',
alpha=0.05,
split_method=None,
n_clusters=None,
max_clusters=None,
plot=True,
):
"""
Calculates series clusters and appends a column of cluster labels to the input df. This will only work on regularly spaced time series datasets.
df: pandas df
ts_settings: dictionary of parameters for time series project
method: type of clustering technique: must choose from either pacf, correlation, or target
nlags: int (Optional)
Number of AR(n) lags. Only applies to PACF method
scale: boolean (Optional)
Only applies to PACF method
scale_method: str (Optiona)
Choose between normalize (subtract the mean and divide by the std) or min_max (subtract the min and divide by the range)
split_method: str (Optional)
Choose between rank and quanitles. Only applies to target method
n_clusters: int
Number of clusters to create. If None, defaults to maximum silhouette score
max_clusters: int
Maximum number of clusters to create. If None, default to the number of series - 1
Returns:
--------
Updated pandas df with a new column 'Cluster' of clusters labels
-silhouette score per cluster:
(The best value is 1 and the worst value is -1. Values near 0 indicate overlapping
clusters. Negative values generally indicate that a sample has been assigned to the
wrong cluster.)
-plot of distortion per cluster
"""
target = ts_settings['target']
date_col = ts_settings['date_col']
series_id = ts_settings['series_id']
df = df.copy()
df.sort_values(by=[series_id, date_col], ascending=True, inplace=True)
series = df[series_id].unique()
n_series = len(series)
if max_clusters is None:
max_clusters = n_series - 1
assert (
1 < max_clusters < n_series
), 'max_clusters must be greater than 1 and less than or equal to the number of unique series -1'
if n_clusters:
assert (
1 < n_clusters <= max_clusters
), f'n_clusters must be greater than 1 and less than {max_clusters}'
c = df.pivot(index=date_col, columns=series_id, values=target)
if method == 'pacf':
d = pd.DataFrame(
[_get_pacf_coefs(c, x, nlags, alpha, scale, scale_method) for x in c.columns]
) # ignore missing values
d.index = c.columns
distances = pdist(d, 'minkowski', p=2) # 1 for manhattan distance and 2 for euclidean
dist_matrix = squareform(distances)
dist_df = pd.DataFrame(dist_matrix)
dist_df.columns = series
dist_df.index = dist_df.columns
elif method == 'correlation':
dist_df = c.corr(method='pearson')
dist_df = dist_df.apply(lambda x: x.fillna(x.mean()), axis=1)
dist_df = dist_df.apply(lambda x: x.fillna(x.mean()), axis=0)
elif method == 'target':
if split_method is not None:
if n_clusters:
cuts = n_clusters
else:
cuts = max_clusters
new_df = _split_series(df, series_id, target, by=split_method, cuts=cuts)
return new_df # exit function
else:
dist_df = df.groupby(series_id).agg({target: 'mean'})
else:
raise ValueError(
f'{method} is not a supported value. Must be set to either pacf, correlation, or target'
)
# Find optimal number of clulsters is n_clusters is not specified
if n_clusters is None:
n_clusters = _get_optimal_n_clusters(
df=dist_df, n_series=n_series, max_clusters=max_clusters, plot=plot
)
kmeans = KMeans(n_clusters).fit(dist_df)
labels = kmeans.predict(dist_df)
df_clusters = (
pd.concat([pd.Series(series),
|
pd.Series(labels)
|
pandas.Series
|
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import BCELoss, Module
from torch.optim import Adam
import pandas as pd
from torch import Tensor, save
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
from Model import BreastCancerPrognosisModel
model = BreastCancerPrognosisModel()
criterion = BCELoss()
optimizer = Adam(model.parameters())
# Headers for the dataset
columns = ['ID', 'CLUMP_THICKNNESS', 'UNIFORMITY_OF_CELL_SIZE', 'UNIFORMITY_OF_CELL_SHAPE', 'MARGINAL_ADHESION',
'SINGLE_EPITHELIAL_CELL_SIZE', 'BARE_NUCLEI', 'BLAND_CHROMATIN', 'NORMAL_NUCLEI', 'MITOSIS', 'TARGET_CLASS']
raw_data =
|
pd.read_csv('breast-cancer-wisconsin.data', header=None)
|
pandas.read_csv
|
import pickle
from datetime import datetime
import re
import time
import getpass
import os
import sys
import re
#requirements
import json
import pandas as pd
import helium as h
from selenium.common.exceptions import NoSuchElementException
import pathlib
pd.set_option("max_rows",100)
#pd.set_option("display.max_columns",100)
pd.set_option("max_colwidth",1000)
def get_info(df,cod_gr):
# nombre_lider missing
try:
nombre_lider = df['Nombre Líder'].dropna().iloc[0]
except IndexError:
nombre_lider = 'Sin dato Registrado'
info= {
'Nombre_Grupo' : df['Nombre Grupo'].dropna().iloc[0],
'Nombre_Lider' : nombre_lider,
'CCRG Grupo' : cod_gr
}
dfi = pd.DataFrame(info, index=[0])
return dfi
# extra headers by products
DBEH = {
'INFO_GROUP': 'TABLE',
'MEMBERS':['Identificación', 'Nacionalidad', 'Tiene afiliación con UdeA', 'Si no tiene afiliación UdeA diligencie el nombre de la Institución','Nro. Horas de dedicación semanales que avala el Coordinador de grupo'], # 2
'NC_P': {'ART_IMP_P': {'ART_P_TABLE':['URL','DOI','Si no tiene URL o DOI agregue una evidencia en el repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'ART_ELE_P': {'ART_E_P_TABLE':['URL','DOI','Si no tiene URL o DOI agregue una evidencia en el repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'LIB_P': {'LIB_P_TABLE':['Proyecto de investigación del cual se derivó el libro (Código-Título)','Financiador(es) del proyecto del cual se derivó el libro', 'Financiador(es) de la publicación','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CAP_LIB_P': {'CAP_LIB_P_TABLE':['Proyecto de investigación del cual se derivó el libro que contiene el capítulo (Código-Título)','Financiador del proyecto del cual se derivó el libro que contiene el capítulo','Financiador de la publicación','Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'NOT_CIE_P': {'NOT_CIE_P_TABLE':['URL','DOI','Si no tiene URL o DOI genere una evidencia en el repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PAT_P': {'PAT_P_TABLE':['Autores', 'Examen de fondo favorable','Examen preliminar internacional favorable','Adjunta opiniones escritas de la bUsqueda internacional','Contrato de explotación','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 2 3 -1
'PRD_INV_ART_P': {'PAAD_P_TABLE':['Autores','Tiene certificado institucional de la obra','Tiene certificado de la entidad que convoca al evento en el que participa','Tiene certificado de la entidad que convoca al premio en el que obtiene','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 2 3 -1
'VAR_VEG_P': {'VV_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'VAR_ANI_P': {'VA_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'RAZ_PEC_P': {'RAZ_PEC_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'TRA_FIL_P': {'TRA_FIL_P_TABLE':['Proyecto de investigación del cual se derivó el libro (Código-Título)','Financiador(es) del proyecto del cual se derivó el libro','Financiador(es) de la publicación','Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}
},
'DTI_P': {'DIS_IND_P': {'DI_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CIR_INT_P': {'ECI_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'SOFT_P': {'SF_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','TRL','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'NUTRA_P': {'NUTRA_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # add
'COL_CIENT_P': {'COL_CIENT_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo', '¿El producto cumple con los requisitos para ser avalado?']},
'REG_CIENT_P': {'REG_CIENT_P_TABLE':['Autores','Contrato licenciamiento (si aplica)','Agregue las evidencias verificadas al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PLT_PIL_P': {'PP_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PRT_IND_P': {'PI_P_TABLE':['Autores','Nombre comercial (si aplica)','TRL','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'SEC_IND_P': {'SE_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PROT_VIG_EPID_P': {'PROT_VIG_EPID_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'EMP_BSE_TEC_P': {'EBT_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'EMP_CRE_CUL_P': {'ICC_P_TABLE':['Autores','Agregue la evidencia verificada al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'INN_GES_EMP_P': {'IG_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'INN_PROC_P': {'IPP_P_TABLE':['Autores','Contrato (si aplica)','Nombre comercial (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'REG_NORM_REGL_LEG_P': {'RNR_P_TABLE':['Autores','Contrato (si aplica)','Convenio (si aplica)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CONP_TEC_P': {'CONP_TEC_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'REG_AAD_P': {'AAAD_P_TABLE':['Autores','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'SIG_DIS_P': {'SD_P_TABLE':['Autores','Contrato licenciamiento (si aplica)','Agregue las evidencias verificadas al repositorio digital y copie el link del archivo en este campo','¿El producto cumple con los requisitos para ser avalado?']}
},
'ASC_P': {'GEN_CONT_IMP_P': {'GC_I_P_TABLE_5':['Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'PASC_P': {'PASC_FOR_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'PASC_TRA_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'PASC_GEN_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'PASC_CAD_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'DC_P': {'DC_CD_P_TABLE':['Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'DC_CON_P_TABLE':['Medio de verificación','Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'DC_TRA_P_TABLE':['Medio de verificación','Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?'],
'DC_DES_P_TABLE':['Medio de verificación','Proyecto/Código','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}},
'FRH_P': {'TES_DOC_P': {'TD_P_TABLE':['Número de cédula del graduado','¿La fecha fin coincide con la fecha de grado del estudiante?','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 -1
'TES_MAST_P': {'TM_P_TABLE':['Número de cédula del graduado','¿La fecha fin coincide con la fecha de grado del estudiante?','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 -1
'TES_PREG_P': {'TP_P_TABLE':['Número de cédula del graduado','¿La fecha fin coincide con la fecha de grado del estudiante?','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}, # 1 -1
'ASE_PRG_ACA_P': {'APGA_P_TABLE':['Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'ASE_CRE_CUR_P': {'ACC_P_TABLE':['Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'ASE_PRG_ONDAS_P': {'APO_P_TABLE':['Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}},
'NC' : {'LIB' : {'LIB_T_AVAL_TABLE': ['Proyecto de investigación del cual se derivó el libro (Código-Título)','Financiador(es) del proyecto del cual se derivó el libro', 'Financiador(es) de la publicación','Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']},
'CAP_LIB':{'CAP_LIB_T_AVAL_TABLE':['Proyecto de investigación del cual se derivó el libro que contiene el capítulo (Código-Título)','Financiador del proyecto del cual se derivó el libro que contiene el capítulo','Financiador de la publicación','Autores','Citas recibidas (si tiene)','Agregue las evidencias verificadas al repositorio digital y genere un hipervínculo en este campo','¿El producto cumple con los requisitos para ser avalado?']}}
}
d = {
'1': 'C',
'2': 'D',
'3': 'E',
'4': 'F',
'5': 'G',
'6': 'H',
'7': 'I',
'8': 'J',
'9': 'K',
'10': 'L',
'11': 'M',
'12': 'N',
'13': 'O',
'14': 'P',
'15': 'Q',
'16': 'R',
'17': 'S',
'18': 'T',
'19': 'U',
'20': 'V'
}
def clean_df(df):
'remove innecesari collums'
c=[x for x in df.columns if x.find('Unnamed:') == -1 and x.find('Revisar') == -1 and x.find('Avalar integrante') == -1]
dfc=df[c]
return dfc
def clean_tables(df):
#droplevel
try:
df = df.droplevel(0,axis=1)
except ValueError:
pass
#ignore firts(NaN) and last(string:resultados) rows
df=df[1:-1]
#remove unnamed columns and revisar
cols = [x for x in df.columns if x.find('Unnamed:') == -1 and x.find('Revisar') == -1 and x.find('Avalar integrante') == -1]
return df[cols]
def rename_col(df,colr,colf):
df.rename(columns = {colr: colf,}, inplace = True)
return df
# WORKSHEET 4 - 12.
def format_df(df, sheet_name, start_row, writer,eh, veh = None):
'format headers'
df.to_excel(writer,sheet_name, startrow = start_row+1, startcol=2,index = False)
# Get the xlsxwriter workbook and worksheet objects.
worksheet = writer.sheets[sheet_name]
merge_format = workbook.add_format({
'bold': 1,
'border':1,
'text_wrap': True,
'align': 'center',
'valign': 'vcenter',
'font_color': 'blue'})
#form merge cells
if not df.empty:
start,end = 1,df.shape[1]
else:
start,end = 1,1
m_range = d.get(str(start)) + str(start_row + 1) + ':' + d.get(str(end)) + str(start_row +1)
worksheet.merge_range(m_range, 'Información suministrada por la Vicerrectoría de Investigación', merge_format)
# for merge headers cells
_m_range = d.get(str(end+1)) + str(start_row +1) + ':' + d.get(str(end+len(eh))) + str(start_row +1)
worksheet.merge_range(_m_range, 'Validación del Centro, Instituto o Corporación', merge_format)
worksheet.set_row_pixels(start_row+1, 120)
#worksheet.set_column('C:C',30,general)
# SET COLUMS FORMAT BY SHEET
if sheet_name=='3.Integrantes grupo':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('D:K',15,general)
if sheet_name=='4.ART y N':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('M:O',20, general)
if sheet_name=='5.LIB y LIB_FOR':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('I:P',20,general)
if sheet_name=='6.CAP':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:H',10,general)
worksheet.set_column('I:K',18,general)
worksheet.set_column('J:P',20,general)
if sheet_name=='7.Patente_Variedades':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:I',10,general)
worksheet.set_column('J:K',20,general)
worksheet.set_column('L:S',20,general)
if sheet_name=='8.AAD':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('F:K',10,general)
worksheet.set_column('L:P',25,general)
if sheet_name=='9.Tecnológico':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:I',10,general)
worksheet.set_column('J:S',18,general)
if sheet_name=='10.Empresarial':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',20,general)
worksheet.set_column('D:H',10,general)
worksheet.set_column('I:N',20,general)
if sheet_name=='11.ASC y Divulgación':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',28,general)
worksheet.set_column('I:I',15,general)
worksheet.set_column('J:N',20,general)
if sheet_name=='12.Formación y programas':
worksheet.set_column('A:A', 5)
worksheet.set_column('B:B', 2)
worksheet.set_column('C:C',25,general)
worksheet.set_column('D:G',10,general)
worksheet.set_column('L:O',15,general)
worksheet.set_column('N:N',20,general)
worksheet.write(start_row+1, 0, 'VoBo de VRI', merge_format)
# Add a header format.
fmt_header = workbook.add_format({
'bold': True,
'align': 'center',
'text_wrap': True,
'valign': 'vcenter',
'fg_color': '#33A584',
'font_color': '#FFFFFF',
'border': 1})
# Write the column headers with the defined format.
for col_num, value in enumerate(df.columns.values):
worksheet.write(start_row+1, col_num + 2, value, fmt_header)
# write extra headers
for col_num, value in enumerate(eh):
worksheet.write(start_row+1, col_num + df.shape[1] + 2, value, fmt_header)
v_range = 'A' + str(start_row +3) + ':' + 'A' + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
if sheet_name !='3.Integrantes grupo':
v_range = d.get(str(end+len(eh))) + str(start_row +3) + ':' + d.get(str(end+len(eh))) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
# Integrantes
if veh == 0:
v_range = d.get(str(end+len(eh)-2)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-2)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
# patentes
if veh == 1 :
v_range = d.get(str(end+len(eh)-3)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-3)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-4)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-4)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-5)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-5)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
if veh ==2:
v_range = d.get(str(end+len(eh)-2)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-2)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
if veh == 3:
v_range = d.get(str(end+len(eh)-2)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-3)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-3)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-4)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
v_range = d.get(str(end+len(eh)-4)) + str(start_row +3) + ':' + d.get(str(end+len(eh)-5)) + str(df.shape[0] + start_row +2)
worksheet.data_validation(v_range,{'validate': 'list',
'source': ['Sí', 'No']})
##### WORKSHEET 2
def format_info(df, writer, sheet_name):
'''format worksheet'''
workbook=writer.book
normal=workbook.add_format({'font_size':12,'text_wrap':True})
merge_format = workbook.add_format({
'bold': 1,
'border':1,
'text_wrap': True,
'align': 'center',
'valign': 'vcenter',
'font_color': 'black'})
fmt_header = workbook.add_format({
'align': 'center',
'text_wrap': True,
'valign': 'top',
'fg_color': '#33A584',
'font_color': '#FFFFFF',
'border': 1})
# write df
start_row = 6
start_col = 3
df.to_excel(writer, sheet_name, startrow =start_row, startcol=start_col,index = False)
# get worksheet object
worksheet = writer.sheets[sheet_name]
for col_num, value in enumerate(df.columns.values):
worksheet.write(start_row, col_num + 3, value, fmt_header)
#Prepare image insertion: See → https://xlsxwriter.readthedocs.io/example_images.html
worksheet.set_column('A:A', 15)
worksheet.set_column('B:B', 15)
logo_path = str(pathlib.Path(__file__).parent.absolute()) + '/templates/img/logo.jpeg'
worksheet.insert_image('A1', logo_path)
# title 1 UNIVERSIDAD DE ANTIOQUIA
title = workbook.add_format({'font_size':16,'center_across':True})
# title 2 Vicerrectoria de Investigación
title2 = workbook.add_format({'font_size':16,'center_across':True})
# sub title 2 datos identificacion contacto
title3 = workbook.add_format({'font_size':12,'center_across':True})
# merge d1:f1
worksheet.merge_range('D1:F1', 'UNIVERSIDAD DE ANTIOQUIA', title)
# merge d2:f2
worksheet.merge_range('D2:F2', ' Vicerrectoria de Investigación', title2)
# merge d3:f3
worksheet.merge_range('D3:F3', ' Datos de identificación y contacto', title3)
# D5: F5
worksheet.merge_range('D5:E5','Número inscripcion a la convocatoria:',merge_format)
worksheet.write('F5','#',merge_format)
# d6:f6
worksheet.merge_range('D6:F6','Identificación del Grupo',merge_format)
# d9:f9
worksheet.merge_range('D10:F10','Identificación del Centro de Investigación',merge_format)
# write
a='Nombre del Centro, Instituto o Corporación'
worksheet.write('D11',a, fmt_header)
worksheet.set_column('D11:D11',30, fmt_header)
b='Nombre completo del Jefe de Centro, Instituto o Corporación'
worksheet.write('E11',b, fmt_header)
worksheet.set_column('E11:E11',30, fmt_header)
c='Email'
worksheet.write('F11',c, fmt_header)
worksheet.set_column('F11:F11',30, fmt_header)
# d13:f13
worksheet.merge_range('D13:F13','Identificación de quien diligencia el formato',merge_format)
a='Nombre completo del encargado de diligenciar el formato'
worksheet.write('D14',a, fmt_header)
worksheet.set_column('D14:D14',30, normal)
b='Email'
worksheet.write('E14',b, fmt_header)
worksheet.set_column('E14:E14',30, normal)
c='Teléfono de contacto'
worksheet.write('F14',c, fmt_header)
worksheet.set_column('F14:F14',30, normal)
# WORKSHEET 1
def format_ptt(workbook):
#Global variables
abstract_text='VERIFICACIÓN DE INFORMACIÓN PARA OTORGAR AVAL A LOS GRUPOS DE INVESTIGACIÓN E INVESTIGADORES PARA SU PARTICIPACIÓN EN LA CONVOCATORIA 894 DE 2021 DE MINCIENCIAS'
instructions='''Los grupos de investigación e investigadores de la Universidad de Antioquia que deseen participar en la Convocatoria Nacional para el reconocimiento y medición de grupos de investigación, desarrollo tecnológico o de innovación y para el reconocimiento de investigadores del Sistema Nacional de Ciencia, Tecnología e Innovación - SNCTI, 894 de 2021, deben presentar la información actualizada en las plataformas CvLAC y GrupLAC validada por el Centro de Investigación en el presente formato, y respaldada en el repositorio digital de evidencias dispuesto para este fin, para la obtención del aval institucional por parte de la Vicerrectoría de Investigación.
La información a validar corresponde a los años 2019-2020 y aquella que entra en la ventana de observación y debe ser modificada según el Modelo de medición de grupos. La validación comprende:
1. Verificación de la vinculación de los integrantes a la Universidad de Antioquia y al grupo de investigación. Diligenciar los campos solicitados.
2. Verificación de la producción de GNC, DTeI, ASC y FRH, en los campos habilitados en cada hoja de este formato. Las evidencias requeridas para los productos deben ser anexadas al repositorio digital asignado al grupo y se deben enlazar a cada producto.
Este documento debe ser diligenciado en línea.
De antemano, la Vicerrectoría de Investigación agradece su participación en este ejercicio, que resulta de vital importancia para llevar a buen término la Convocatoria de Reconocimiento y Medición de Grupos de Investigación
'''
#Final part of the first sheet
datos=clean_df(pd.read_excel('https://github.com/restrepo/InstituLAC/raw/main/data/template_data.xlsx'))
#Capture xlsxwriter object
# IMPORTANT → workbook is the same object used in the official document at https://xlsxwriter.readthedocs.io
#workbook=writer.book
#***************
#Styles as explained in https://xlsxwriter.readthedocs.io
title=workbook.add_format({'font_size':28,'center_across':True})
subtitle=workbook.add_format({'font_size':24,'center_across':True})
abstract=workbook.add_format({'font_size':20,'center_across':True,'text_wrap':True})
normal=workbook.add_format({'font_size':12,'text_wrap':True})
#***************
#Creates the first work-sheet
#IMPORTANT → worksheet is the same object used in the official document at https://xlsxwriter.readthedocs.io
worksheet=workbook.add_worksheet("1.Presentación")
#Prepare image insertion: See → https://xlsxwriter.readthedocs.io/example_images.html
worksheet.set_column('A:A', 15)
worksheet.set_column('B:B', 15)
logo_path = str(pathlib.Path(__file__).parent.absolute()) + '/templates/img/logo.jpeg'
worksheet.insert_image('A1', logo_path)
#Prepare text insertion: See → https://xlsxwriter.readthedocs.io/example_images.html
worksheet.set_column('C:C', 140,general)
worksheet.set_row_pixels(0, 60)
#Texts
worksheet.write('C1', 'UNIVERSIDAD DE ANTIOQUIA',title)
worksheet.set_row_pixels(2, 60)
worksheet.write('C3', 'VICERRECTORÍA DE INVESTIGACIÓN',subtitle)
worksheet.set_row_pixels(5, 100)
worksheet.write('C6', abstract_text,abstract)
worksheet.set_row_pixels(8, 40)
worksheet.write('C9','PRESENTACIÓN DEL EJERCICIO',
workbook.add_format({'font_size':18,'center_across':True}) )
worksheet.set_row_pixels(10, 320)
worksheet.write('C11',instructions,normal)
#*** ADD PANDAS DATAFRAME IN SPECIFIC POSITION ****
#Add a data Frame in some specific position. See → https://stackoverflow.com/a/43510881/2268280
# See also → https://xlsxwriter.readthedocs.io/working_with_pandas.html
writer.sheets["1.Presentación"]=worksheet
datos.to_excel(writer,sheet_name="1.Presentación",startrow=12,startcol=2,index=False)
#**************************************************
#Fix columns heights for long text
worksheet.set_row_pixels(17, 40)
worksheet.set_row_pixels(18, 40)
worksheet.set_row_pixels(19, 40)
worksheet.set_row_pixels(20, 40)
worksheet.set_row_pixels(22, 40)
def login(user,password,institution='UNIVERSIDAD DE ANTIOQUIA',sleep=0.8,headless=True):
#def login(user,password): → browser, otro, otro
# MAIN CODE
# login =
# name_ins =
# usser =
# passw=
# login
browser = h.start_firefox('https://scienti.minciencias.gov.co/institulac2-war/',headless=headless)
#browser = h.start_firefox('https://scienti.minciencias.gov.co/institulac2-war/')
time.sleep(sleep)
h.click('Consulte Aquí')
time.sleep(sleep)
h.write(institution,into='Digite el nombre de la Institución') # name ins
time.sleep(sleep)
h.click('Buscar')
time.sleep(sleep)
h.click(browser.find_element_by_id('list_instituciones'))
time.sleep(sleep)
time.sleep(sleep)
h.select('seleccione una',institution) # name_ins
time.sleep(sleep)
h.write(user,into='Usuario') # user
time.sleep(sleep)
h.write(password, into='Contraseña') # passw
time.sleep(sleep)
h.click(h.Button('Ingresar'))
# cookie injection
time.sleep(sleep)
# implementation cookie injection
# get current cookie and store
new_cookie=browser.get_cookies()[0]
# create new_cookie with time_expire
time_expire = (datetime(2022,1,1) - datetime(1970,1,1)).total_seconds()
new_cookie['expiry'] = int(time_expire)
# delete cookie sites
browser.delete_all_cookies()
# add new cookie
browser.add_cookie(new_cookie)
try:
error=browser.find_element_by_class_name("error")
if error.text.lower().find('fallidos')>-1:
print("ERROR! Bad login or password")
return False
else:
pass
except NoSuchElementException:
pass
# navigation 1
time.sleep(sleep)
h.click('Aval')
time.sleep(sleep)
h.click('Avalar grupos')
time.sleep(sleep)
h.click('Grupos Avalados')
# -- end login --
# list of total groups
#select max results per page
h.wait_until(h.Text('Ver Reporte').exists)
h.click(browser.find_element_by_xpath('//table[@id="grupos_avalados"]//select[@name="maxRows"]'))
time.sleep(sleep)
h.select(browser.find_element_by_xpath('//table[@id="grupos_avalados"]//select[@name="maxRows"]'),'100')
return browser
def get_groups(browser,DIR='InstituLAC',sleep=0.8):
# catch 1: groups info [name, lider, cod, link to producs]
# schema
# empty df
# select max items per page
# while until end
# try:
# catch table
# preproces table
# catch urls
# add url colums
# add df
# click next page -> raise error
# except Nosuchelement:
# break
# catch 1: list of groups
dfg=pd.DataFrame()
cont=True
while cont:
try:
# catch source
time.sleep(sleep)
source_g=browser.page_source
# catch table
time.sleep(sleep)
df=pd.read_html(source_g, attrs={"id":"grupos_avalados"}, header=2)[0]
# and preprocces it
c=[x for x in df.columns if x.find('Unnamed:') == -1]
dfgp=df[c][1:-1]
print(dfgp.columns,dfgp.shape)
# catch urls
url=[a.get_attribute('href') for a in browser.find_elements_by_xpath('//table[@id="grupos_avalados"]//td[5]/a')]
dfgp['Revisar'] = url
dfg=dfg.append(dfgp)
# click next page. this instruction rise error of the end.
h.click(browser.find_element_by_xpath('//table[@id="grupos_avalados"]//tr/td[3]/a'))
except NoSuchElementException as e:
print(e)
print('out of cicle')
break
time.sleep(sleep)
time.sleep(sleep)
dfg=dfg.reset_index(drop=True)
with open(f'{DIR}/dfg.pickle', 'wb') as f:
pickle.dump(dfg, f)
return browser,dfg
def get_DB(browser,target_data,DB=[],dfg=pd.DataFrame(),sleep=0.8,DIR='InstituLAC',
start=None,end=None,COL_Group='',start_time=0):
os.makedirs(DIR,exist_ok=True)
if dfg.empty:
browser,dfg=get_groups(browser,DIR=DIR,sleep=sleep)
dfg = dfg.reset_index(drop=True)
#find start and end if COL_Group
if COL_Group:
dfcg=dfg[dfg['COL Grupo']==COL_Group]
if not dfcg.empty:
start=dfcg.index[0]
end=start+1
#assert dfg.shape[0] == 324
time.sleep(sleep*2)
for idx in dfg.index[start:end]: # TEST
# create db for store things related to group
DBG = {} # HERE V1. DBG.keys = [cat1,cat2,...,catN]
# DBG['cat1'].keys = [prod1bycat,...prodnbycat]
# part info group
print(dfg.loc[idx,'Nombre del grupo'])
# specific group url
time.sleep(sleep)
url_group = dfg.loc[idx,'Revisar']
# go to url group
time.sleep(sleep)
browser.get(url_group)
# catch two tables: info grupo and members
source=browser.page_source
# Info group
l_info=pd.read_html(source, match='Nombre Grupo')
info_g=l_info[3].pivot(columns=0,values=1)
# Store info group
DBG['Info_group'] = info_g
# List members
l_int = pd.read_html(source,attrs={'id':'tblIntegrantes'},header=2)
mem_g=l_int[0]
# Store list of members
DBG['Members'] = mem_g
# Products
h.wait_until(lambda: browser.find_element_by_xpath('//td[@id="bodyPrincipal"]//a[text()="Ver productos"]') is not None)
h.click(browser.find_element_by_xpath('//td[@id="bodyPrincipal"]//a[text()="Ver productos"]'))
# Target products = ALL products: no aval, aval, aval pert (Categories)
_target_data = [('//*[@id="ProdsNoAval"]', '//div[@id="accordionCatgNoAval"]/h3', 'categoriaNoAval=%s&subcategoriaNoAval=%s&aval=F'),
('//*[@id="ProdsAval"]','//div[@id="accordionCatg"]/h3','categoria=%s&subcategoria=%s&aval=T'),
('//*[@id="ProdsPertenecia"]','//div[@id="accordionCatgP"]/h3','categoriaP=%s&subcategoriaP=%s&aval=P')
]
if target_data == 'NoAval':
target_data = target_data = _target_data[0:1]
print('map NoAvalProds')
elif target_data == 'Aval':
target_data = _target_data[1:2]
print('map institulac NoAvalProds')
elif target_data == 'Pert':
target_data = _target_data[2:]
print('map Pert institulac prods')
elif target_data == 'All':
target_data = _target_data
print('map all institulac prods')
lcp = [] # list of categories and prods by cat dif to cero e.g. [[NC_NO_AVAL,ART_IMP_NO_AVAL],[NC,ART_IMP]...]
for i in target_data:
print('#####')####
time.sleep(sleep)
h.wait_until(lambda: browser.find_element_by_xpath(i[0]) is not None)
h.click(browser.find_element_by_xpath(i[0]))
time.sleep(sleep)
url_base=browser.current_url
# MAP
# map products by macro-Cat (prod aval per) diff to cero
sleep = 0.8
for cat in browser.find_elements_by_xpath(i[1]):
# exist products
id_cat = cat.get_attribute('id')
#print(cat.text,'----',id_cat)
num_prods_cat = int(re.findall(r'\d+',cat.text)[0])
if num_prods_cat > 0:
time.sleep(sleep)
h.click(cat)
print(cat.text,'----',id_cat)
else:
continue
for prod in browser.find_elements_by_xpath('//div[@aria-labelledby="%s"]/h3' % cat.get_attribute('id')):
# items in products
#h.click(cat)
id_prod = prod.get_attribute('id')
#print(' ',prod.text,id_prod)
#print(prod)
num_items_prod = int(re.findall(r'\d+',prod.text)[0])
if num_items_prod > 0:
lcp.append([id_cat,id_prod])
print(' ',prod.text,id_prod)
else:
continue
time.sleep(sleep)
h.click(cat)
# DBG
# build database
for cat in lcp:
if cat[0] not in DBG.keys():
DBG[cat[0]] = {}
for prod in lcp:
# build query by case no aval, aval rev, pert
if 'NO_AV' in prod[0]:
query='categoriaNoAval=%s&subcategoriaNoAval=%s&aval=F' % (prod[0],prod[1])
elif '_P' in prod[0]:
query='categoriaP=%s&subcategoriaP=%s&aval=P' % (prod[0],prod[1])
else:
query='categoria=%s&subcategoria=%s&aval=T' % (prod[0],prod[1])
# HERE
url_query = url_base.split('?')[0] + '?' + query + '&' + url_base.split('?')[1]
# do query
browser.get(url_query)
# wait until complete load
h.wait_until(h.Button('Guardar').exists,timeout_secs=20)
# load
page_source = browser.page_source
# detect tables
try:
tables = pd.read_html(browser.page_source,attrs={'class':'table'})
# clean tables
except (ValueError, ImportError) as e:
tables = [None]
try:
tables = [clean_tables(t) for t in tables]
except AttributeError as e:
pass
# store table or tables
if len(tables)>1:
c=0
for t in tables:
if t.shape[0] >= 1:
DBG[prod[0]][prod[1]+'_%s' % c] = t
c+=1
else:
DBG[prod[0]][prod[1]] = tables[0]
time.sleep(sleep)
# store in general DB.
DB.append(DBG)
with open(f'{DIR}/DB.pickle', 'wb') as f:
pickle.dump(DB, f)
print(time.time()-start_time)
browser.close()
return DB,dfg
def to_excel(DB,dfg,DIR='InstituLAC'):
os.makedirs(DIR,exist_ok=True)
global general
global writer
global workbook
# ONE GROUP IMPLEMENTATION
for idxx in range(len(DB)):
# DATA
DBG = DB[idxx]
### excel name
name = 'Plantilla_Formato de verificación de información_GrupLAC_894-2021_'
cod_gr = dfg.loc[idxx,'COL Grupo']
try:
col_gr = dfg[dfg['Nombre del grupo']==DBG['Info_group']['Nombre Grupo'].dropna().iloc[-1]
]['COL Grupo'].iloc[-1]
except:
col_gr=cod_gr #safe option valid in sequential mode
# initialize object= output excel file
os.makedirs(f'{DIR}/{col_gr}',exist_ok=True)
os.makedirs(f'{DIR}/{col_gr}/Repositorio_digital_{col_gr}',exist_ok=True)
writer = pd.ExcelWriter(f'{DIR}/{col_gr}/{name}{col_gr}.xlsx', engine='xlsxwriter')
workbook=writer.book
general=workbook.add_format({'text_wrap':True})
# PPT
format_ptt(workbook)
# INFO GROUP
df=get_info(DBG['Info_group'], col_gr)
format_info(df, writer, '2.Datos de contacto')
# WORKSHEET 1
df = clean_df(DBG['Members'])
eh = DBEH['MEMBERS']
format_df(df, '3.Integrantes grupo', 1, writer, eh, veh=0) #### veh = 0
### NC_P ###
#------- w4 -------
# 4.ART y N
var_w4 = 0
try:
df=clean_df(DBG['NC_P']['ART_IMP_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc+1)
eh=DBEH['NC_P']['ART_IMP_P']['ART_P_TABLE']
format_df(df, '4.ART y N', var_w4, writer,eh)
var_w4 += df.shape[0] + 3
except KeyError as e:
pass
try:
df=clean_df(DBG['NC_P']['ART_ELE_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['ART_ELE_P']['ART_E_P_TABLE']
format_df(df, '4.ART y N', var_w4, writer,eh)
var_w4 += df.shape[0] + 3
except KeyError as e:
pass
try:
df=clean_df(DBG['NC_P']['NOT_CIE_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['NOT_CIE_P']['NOT_CIE_P_TABLE']
format_df(df, '4.ART y N', var_w4, writer,eh)
var_w4 += df.shape[0] + 3
except KeyError as e:
pass
# -------------- w4 -------------------------
#------------ ---w5------------
# 5.LIB y LIB_FOR
var_w5 = 0
# libros por pertenencia
try:
df=rename_col(clean_df(DBG['NC_P']['LIB_P']),'Título del artículo','Título del libro')
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['LIB_P']['LIB_P_TABLE']
format_df(df, '5.LIB y LIB_FOR', var_w5, writer,eh)
var_w5 += df.shape[0] + 3
except KeyError as e:
pass
# libros avalados con revisión
try:
df=rename_col(clean_df(DBG['NC']['LIB']), 'Título del artículo' ,'Título del libro')
#df.to_excel(writer,sheet_name='FRH_P',startrow = var_rh)
eh=DBEH['NC']['LIB']['LIB_T_AVAL_TABLE']
format_df(df, '5.LIB y LIB_FOR', var_w5 , writer, eh)
var_w5 += df.shape[0] + 3
except KeyError as e:
pass
# libros formacion
try:
df=rename_col(clean_df(DBG['ASC_P']['GEN_CONT_IMP_P']),'Título del libro','Título del libro formación') # lib form
if df.shape[0] != 0:
eh=DBEH['ASC_P']['GEN_CONT_IMP_P']['GC_I_P_TABLE_5']
format_df(df, '5.LIB y LIB_FOR', var_w5 , writer,eh)
var_w5 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# --------------------w5--------------
#--------------------w6---------------
#6.CAP
# cap pertenencia
var_w6 = 0
try:
df=clean_df(DBG['NC_P']['CAP_LIB_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['CAP_LIB_P']['CAP_LIB_P_TABLE']
format_df(df, '6.CAP',var_w6, writer,eh)
var_w6 += df.shape[0] + 3
except KeyError as e:
pass
# caps avalados con revision
try:
df = clean_df(DBG['NC']['CAP_LIB']) ### ,veh = 2
#df.to_excel(writer,sheet_name='FRH_P',startrow = var_rh)
eh = DBEH['NC']['CAP_LIB']['CAP_LIB_T_AVAL_TABLE']
format_df(df, '6.CAP', var_w6, writer, eh)
var_w6 += df.shape[0] + 3
except KeyError as e:
pass
# traduccion filologica
try:
df=rename_col(clean_df(DBG['NC_P']['TRA_FIL_P']),'Título del libro', 'Título traducción filologica')
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['TRA_FIL_P']['TRA_FIL_P_TABLE']
format_df(df, '6.CAP', var_w6, writer,eh)
var_w6 += df.shape[0] + 3
except KeyError as e:
pass
#-------------------w6------------------
#------------w7-------------------------
#7.Patente_Variedades
var_w7 = 0
# patentes
try:
df=rename_col(clean_df(DBG['NC_P']['PAT_P']),'Título del artículo','Título de la patente') ###### veh=1
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['PAT_P']['PAT_P_TABLE']
format_df(df, '7.Patente_Variedades', var_w7, writer,eh, veh=1)
var_w7 += df.shape[0] + 3
except KeyError as e:
pass
# variedad vegetal
try:
df=clean_df(DBG['NC_P']['VAR_VEG_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['VAR_VEG_P']['VV_P_TABLE']
format_df(df, '7.Patente_Variedades', var_w7, writer,eh)
var_w7 += df.shape[0] + 3
except KeyError as e:
pass
# Variedad Animal
try:
df=clean_df(DBG['NC_P']['VAR_ANI_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['VAR_ANI_P']['VA_P_TABLE']
format_df(df, '7.Patente_Variedades', var_w7, writer,eh)
var_w7 += df.shape[0] + 3
except KeyError as e:
pass
# razas pecuarias mejoradas
try:
df=clean_df(DBG['NC_P']['RAZ_PEC_P'])
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['RAZ_PEC_P']['RAZ_PEC_P_TABLE']
format_df(df, '7.Patente_Variedades', var_w7, writer,eh)
var_w7 += df.shape[0] + 3
except KeyError as e:
pass
# ---------------w7---------------------
#---------------w8-------------------
var_w8 = 0
# productos investigacion creacion
try:
df=clean_df(DBG['NC_P']['PRD_INV_ART_P']) ###### veh = 1
#df.to_excel(writer,sheet_name='NC_P',startrow = var_nc)
eh=DBEH['NC_P']['PRD_INV_ART_P']['PAAD_P_TABLE']
format_df(df, '8.AAD', var_w8, writer,eh, veh=3)
var_w8 += df.shape[0] + 3
except KeyError as e:
pass
#-------------W8---------------------
#-------------W9----------------
# 9.Tecnológico
#### DTI_P
var_w9 = 0
# diseño industrial
try:
df=rename_col(clean_df(DBG['DTI_P']['DIS_IND_P']),'Nombre del diseño','Nombre del diseño industrial')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['DIS_IND_P']['DI_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer, eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
#circuitos integrados
try:
df=rename_col(clean_df(DBG['DTI_P']['CIR_INT_P']),'Nombre del diseño', 'Nombre del diseño circuito')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['CIR_INT_P']['ECI_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# colecciones
try:
df=clean_df(DBG['DTI_P']['COL_CIENT_P'])
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['COL_CIENT_P']['COL_CIENT_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# software
try:
df=rename_col(clean_df(DBG['DTI_P']['SOFT_P']),'Nombre del diseño', 'Nombre del diseño de software')
eh=DBEH['DTI_P']['SOFT_P']['SF_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# secreto industrial
try:
df=rename_col(clean_df(DBG['DTI_P']['SEC_IND_P']),'Producto','Nombre secreto industrial')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['SEC_IND_P']['SE_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# prototipo insdustrial
try:
df=rename_col(clean_df(DBG['DTI_P']['PRT_IND_P']), 'Nombre del diseño', 'Nombre del prototipo')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['PRT_IND_P']['PI_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# Registro distintivo
try:
df=clean_df(DBG['DTI_P']['SIG_DIS_P'])
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['SIG_DIS_P']['SD_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# registros de acuerdo licencias expl obras AAD
try:
df=clean_df(DBG['DTI_P']['REG_AAD_P'])
eh=DBEH['DTI_P']['REG_AAD_P']['AAAD_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# prod nutracetico
try:
df=rename_col(clean_df(DBG['DTI_P']['NUTRA_P']),'Nombre del producto','Nombre del producto nutracetico')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_nc)
eh=DBEH['DTI_P']['NUTRA_P']['NUTRA_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# registro cienti
try:
df=clean_df(DBG['DTI_P']['REG_CIENT_P'])
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['REG_CIENT_P']['REG_CIENT_P_TABLE']
format_df(df, '9.Tecnológico',var_w9 , writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# planta piloto
try:
df=clean_df(DBG['DTI_P']['PLT_PIL_P'])
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['PLT_PIL_P']['PP_P_TABLE']
format_df(df, '9.Tecnológico', var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
# protocolo vigilancia epidemologica
try:
df=clean_df(DBG['DTI_P']['PROT_VIG_EPID_P'])
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['PROT_VIG_EPID_P']['PROT_VIG_EPID_P_TABLE']
format_df(df, '9.Tecnológico',var_w9, writer,eh)
var_w9 += df.shape[0] + 3
except KeyError as e:
pass
#---------------------w9----------------
#---------------------w10----------------
# 10.Empresarial
var_w10 = 0
# innovación gestion empresarial
try:
df=rename_col(clean_df(DBG['DTI_P']['INN_GES_EMP_P']),'Nombre de la innovación', 'Nombre de la innovación empresarial')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['INN_GES_EMP_P']['IG_P_TABLE']
format_df(df, '10.Empresarial', var_w10, writer,eh)
var_w10 += df.shape[0] + 3
except KeyError as e:
pass
# innovacion procesos y procedimiento
try:
df=rename_col(clean_df(DBG['DTI_P']['INN_PROC_P']),'Nombre de la innovación','Nombre de la innovación procesos y procedimientos')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['INN_PROC_P']['IPP_P_TABLE']
format_df(df, '10.Empresarial', var_w10, writer,eh)
var_w10 += df.shape[0] + 3
except KeyError as e:
pass
# regulaciones normas reglamentos legislaciones
try:
df=rename_col(clean_df(DBG['DTI_P']['REG_NORM_REGL_LEG_P']),'Tipo producto','Nombre regulación')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['REG_NORM_REGL_LEG_P']['RNR_P_TABLE']
format_df(df, '10.Empresarial', var_w10, writer,eh)
var_w10 += df.shape[0] + 3
except KeyError as e:
pass
# conceptos tecnicos
try:
df=clean_df(DBG['DTI_P']['CONP_TEC_P'])
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['CONP_TEC_P']['CONP_TEC_P_TABLE']
format_df(df, '10.Empresarial', var_w10, writer,eh)
var_w10 += df.shape[0] + 3
except KeyError as e:
pass
# empresa base tecnologica
try:
df=rename_col(clean_df(DBG['DTI_P']['EMP_BSE_TEC_P']),'Tipo','Tipo de empresa base tecnologica')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['EMP_BSE_TEC_P']['EBT_P_TABLE']
format_df(df, '10.Empresarial', var_w10, writer,eh)
var_w10 += df.shape[0] + 3
except KeyError as e:
pass
# empresa de base cultural
try:
df=rename_col(clean_df(DBG['DTI_P']['EMP_CRE_CUL_P']),'Empresa', 'Tipo de empresa base cultural')
#df.to_excel(writer,sheet_name='DTI_P',startrow = var_dt)
eh=DBEH['DTI_P']['EMP_CRE_CUL_P']['ICC_P_TABLE']
format_df(df, '10.Empresarial', var_w10, writer,eh)
var_w10 += df.shape[0] + 3
except KeyError as e:
pass
# -------------------------w10-------------
###### ASC
# -------- w11
# 11.ASC y Divulgación
var_w11 = 0
# productos de interes social
try:
df=rename_col(clean_df(DBG['ASC_P']['PASC_P']),'Nombre','Nombre producto interes social')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['PASC_P']['PASC_FOR_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# Proceso de apropiación social del conocimiento resultado del trabajo conjunto
try:
df=rename_col(clean_df(DBG['ASC_P']['PASC_P']), 'Nombre','Nombre del Proceso de apropiación social del conocimiento resultado del trabajo conjunto entre un Centro de Ciencia y un grupo de investigación')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['PASC_P']['PASC_TRA_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# Nombre del Proceso de apropiación social del conocimiento para la generación de insumos de política pública y normatividad
try:
df=rename_col(clean_df(DBG['ASC_P']['PASC_P']),'Nombre','Nombre del Proceso de apropiación social del conocimiento para la generación de insumos de política pública y normatividad')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['PASC_P']['PASC_GEN_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
#Nombre del Proceso de apropiación social del conocimiento para el fortalecimiento de cadenas productivas
try:
df=rename_col(clean_df(DBG['ASC_P']['PASC_P']),'Nombre', 'Nombre del Proceso de apropiación social del conocimiento para el fortalecimiento de cadenas productivas')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['PASC_P']['PASC_CAD_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# Divulgacion
# Piezas digitales
try:
df=rename_col(clean_df(DBG['ASC_P']['DC_P']),'Título del proyecto','Título del proyecto para la generación de piezas digitales')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['DC_P']['DC_CD_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# textuales
try:
df=rename_col(clean_df(DBG['ASC_P']['DC_P']),'Título del proyecto','Título del proyecto para la generación de piezas Textuales (incluyendo cartillas, periódicos, revistas, etc.), Producción de estrategias transmediáticas y Desarrollos web')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['DC_P']['DC_CON_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# produccion estrategia trasmediatica
try:
df=rename_col(clean_df(DBG['ASC_P']['DC_P']), 'Título del proyecto','Título del proyecto estrategia trasmediatica')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['DC_P']['DC_TRA_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# desarrollo web
try:
df=rename_col(clean_df(DBG['ASC_P']['DC_P']),'Título del proyecto','Título del proyecto desarrollo web')
if df.shape[0] != 0:
eh=DBEH['ASC_P']['DC_P']['DC_DES_P_TABLE']
format_df(df, '11.ASC y Divulgación', var_w11, writer,eh)
var_w11 += df.shape[0] + 3
else:
raise(KeyError)
except KeyError as e:
pass
# --- --- --- -- w11 -- -- -- -- -- -- --
# ---------------w12--------------------
# FRH
var_w12 = 0
# tesis doctorado
try:
df=rename_col(clean_df(DBG['FRH_P']['TES_DOC_P']), 'Título','Título de la tesis de doctorado') ### ,veh = 2
#df.to_excel(writer,sheet_name='FRH_P',startrow = var_rh)
eh=DBEH['FRH_P']['TES_DOC_P']['TD_P_TABLE']
format_df(df, '12.Formación y programas', var_w12, writer, eh,veh=2)
var_w12 += df.shape[0] + 3
except KeyError as e:
pass
# tesis maestria
try:
df=rename_col(DBG['FRH_P']['TES_MAST_P'],'Título','Título del trabajo de grado de maestría') ### veh = 2
#df.to_excel(writer,sheet_name='FRH_P',startrow = var_rh)
eh=DBEH['FRH_P']['TES_MAST_P']['TM_P_TABLE']
format_df(df, '12.Formación y programas',var_w12, writer,eh,veh=2)
var_w12 += df.shape[0] + 3
except KeyError as e:
pass
# tesis pregrado
try:
df=rename_col(clean_df(DBG['FRH_P']['TES_PREG_P']),'Título','Título del trabajo de grado de pregrado') ### veh = 2
#df.to_excel(writer,sheet_name='FRH_P',startrow = var_rh)
eh=DBEH['FRH_P']['TES_PREG_P']['TP_P_TABLE']
format_df(df, '12.Formación y programas',var_w12, writer,eh,veh = 2)
var_w12 += df.shape[0] + 3
except KeyError as e:
pass
# asesoria programa academico
try:
df=rename_col(clean_df(DBG['FRH_P']['ASE_PRG_ACA_P']),'Tipo','Nombre programa academico creado')
eh=DBEH['FRH_P']['ASE_PRG_ACA_P']['APGA_P_TABLE']
format_df(df, '12.Formación y programas', var_w12, writer,eh)
var_w12 += df.shape[0] + 3
except KeyError as e:
pass
# asesoria creacion de cursos
try:
df=rename_col(clean_df(DBG['FRH_P']['ASE_CRE_CUR_P']),'Tipo','Nombre curso creado')
eh=DBEH['FRH_P']['ASE_CRE_CUR_P']['ACC_P_TABLE']
format_df(df, '12.Formación y programas', var_w12, writer,eh)
var_w12 += df.shape[0] + 3
except KeyError as e:
pass
# programa ondas
try:
df=rename_col(clean_df(DBG['FRH_P']['ASE_PRG_ONDAS_P']),'Integrante','Integrante programa ondas')
eh=DBEH['FRH_P']['ASE_PRG_ONDAS_P']['APO_P_TABLE']
format_df(df, '12.Formación y programas', var_w12, writer,eh)
var_w12 += df.shape[0] + 3
except KeyError as e:
pass
#----------------w12---------------------------
writer.save()
# HERE
def dummy_fix_df(DB):
nones=False
for i in range(len(DB)):
for k in list(DB[i].keys())[2:]:
for kk in DB[i][k].keys():
#print(i,k,kk)
if DB[i][k][kk] is None:
nones=True
DB[i][k][kk]={kk:
|
pd.DataFrame()
|
pandas.DataFrame
|
import subprocess
from io import StringIO
import pandas as pd
import numpy as np
from shutil import which
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('a', help='The BED file containing predictions. MUST be BED6 format.')
parser.add_argument('b', help='The ground truth bed file. First 6 columns must be standard BED6, but can have additional columns appended.')
parser.add_argument('window_size', help='Number of bases to append to each side of the predicted site.', type=int)
parser.add_argument('-o', help='output file name, optional')
args = parser.parse_args()
f_GT = args.b
f_PD = args.a
window = args.window_size
# output file name
if not args.o:
f_PD_matched = f_PD[:-4] + '_matched_' + str(window) + '.bed'
else:
f_PD_matched = args.o
def bedtools_window(bed1, bed2, window, reverse=False):
"""
Python wrapper for bedtools window.
reverse: return only sites that have no match in the ground truth.
"""
# make sure bedtools can be called in current env
assert which('bedtools') is not None, "bedtools not installed or not in PATH"
# run bedtools window, capture output
if not reverse:
out = subprocess.run(['bedtools', 'window', '-sm',
'-w', str(window),
'-a', bed1,
'-b', bed2],
capture_output=True, shell=False)
else:
out = subprocess.run(['bedtools', 'window', '-sm', '-v',
'-w', str(window),
'-a', bed1,
'-b', bed2],
capture_output=True, shell=False)
assert out.returncode==0, "bedtools window run failed, check input files"
# memory file-handle to pass output to pandas without writing to disk
out_handle = StringIO(out.stdout.decode())
# incase there were no sites returned (no overlap / all overlap in case of reverse=True)
if not out.stdout.decode():
out = pd.DataFrame()
else:
out =
|
pd.read_csv(out_handle, delimiter='\t', header=None, dtype={0: str})
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
This module contains functionality to comfortably create plots.
"""
from math import floor, ceil, pi
from itertools import islice, chain, cycle, repeat
from collections.abc import Iterable, Mapping
from typing import Union
from warnings import warn
import pandas as pd
import pandas.api.types as pd_types
import numpy as np
from scipy import interpolate
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as plt_colors
import matplotlib.cm as plt_cm
import matplotlib.lines as mlines
import mpl_toolkits.axes_grid1 as axg1
from configparser import ConfigParser
from IPython.display import HTML, display
from tabulate import tabulate
_col_labels = {
'count': 'Anzahl'
}
def spec_col_labels(**kwargs):
"""
Specify labels for column names to be automatically used in plots.
:param kwargs: A map of column names and labels.
"""
_col_labels.update(kwargs)
def spec_col_file(filename):
"""
Specify an INI file with column names to be automatically used in plots.
The column-label-pairs must be placed under the INI section `[Columns]`.
:param filename: A path to the INI file.
"""
cfg = ConfigParser()
cfg.read(filename, encoding='utf8')
_col_labels.update(cfg['Columns'])
def _col_label(label, column):
if label is not None:
return label
if column in _col_labels:
return _col_labels[column]
return column
def table(data: pd.DataFrame, columns=None, labels=None,
with_index=True, index_label=None, limit=None):
"""
Displays an HTML table with the given data.
A subset of columns can be selected with `columns`.
The labels in the header can be explicitly specified with `labels`.
Does not support multi-indexes.
Calls `IPython.display.display()` to present the HTML table.
:param data: A Pandas DataFrame
:param columns: An iterable with column names. (optional)
:param labels: An iterable with column labels. (optional)
Must be the same size as the columns.
:param with_index: A switch to include or exclude the index. (optional)
:param index_label: A string or an iterable with labels for the index.
(optional)
:param limit: A maximum number of rows to display. (optional)
"""
if data.empty:
display(HTML('<p>No Entries</p>'))
columns = columns or data.columns
if labels:
headers = labels
else:
headers = [_col_labels[c] if c in _col_labels else c for c in columns]
if with_index:
headers.insert(0, index_label or 'index')
def cells(r):
return chain((r[0],), (getattr(r, c) for c in columns))
else:
def cells(r):
return (getattr(r, c) for c in columns)
rows = map(cells, data.itertuples())
if limit:
rows = islice(rows, limit)
display(HTML(tabulate(rows, tablefmt='html', headers=headers)))
def _default_figure_handler(subplot, fig, ax=None,
title=None, pad=None,
file_name=None, file_dpi=None):
if not fig:
return
if not subplot:
if pad is not None:
fig.tight_layout(pad=pad)
if file_name:
fig.savefig(file_name, dpi=file_dpi)
if title:
ax = ax or fig.gca()
if ax:
ax.set_title(title)
if not subplot:
plt.show()
current_figure = None
current_grid = (1, 1)
_figure_handler = _default_figure_handler
def _finish_figure(fig=None, **kwargs):
if fig is None:
return
_figure_handler(subplot=_in_multiplot(), fig=fig, **kwargs)
def set_figure_handler(handler):
"""
Set a handler, which is called after rendering every plot.
The specified handler must accept the following keyword arguments:
- ``subplot`` A boolean flag indicating that the figure is a subplot
- ``fig`` The figure object of the plot
- ``ax`` The main axis or `None`
- ``title`` A title for the main axis or `None`
- ``pad`` A padding value for calling `tight_layout()` or `None`
- ``file_name`` The filename for the target image file or `None`
- ``file_dpi`` The dpi value for the target image file or `None`
:param handler: The figure handler to use for future plots
"""
global _figure_handler
_figure_handler = handler
def reset_figure_handler():
"""
Reset the handler, which is called after rendering every plot,
to the default.
"""
global _figure_handler
_figure_handler = _default_figure_handler
def begin(figsize=(10, 5), grid=(1, 1)):
"""
Begins a figure with multiple subplots.
:param figsize: A tuple with the figure size in inches (width, height).
(optional)
:param grid: The grid size to place the subplots in (rows, columns).
(optional)
"""
global current_figure, current_grid
if current_figure is not None:
warn("There is already an open figure. Did you use end()?")
current_figure = plt.figure(figsize=figsize)
current_grid = grid
def end(pad=1.5, w_pad=None, h_pad=None,
file_name=None, file_dpi=300):
"""
Finalizes a figure with multiple subplots.
:param pad: Padding around the figure. (optional)
:param w_pad: Horizontal space between subplots. (optional)
See `matplotlib.pyplot.tight_layout()`.
:param h_pad: Vertical space between subplots. (optional)
See `matplotlib.pyplot.tight_layout()`.
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
global current_figure, current_title
if current_figure is None:
raise Exception("No current figure. Did you use begin()?")
if pad is not None:
plt.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
elif h_pad is not None or w_pad is not None:
plt.tight_layout(h_pad=h_pad, w_pad=w_pad)
fig = current_figure
current_figure = None
_finish_figure(
fig=fig, pad=None,
file_name=file_name, file_dpi=file_dpi)
def _in_multiplot():
global current_figure
return current_figure is not None
def _plt(figsize=(10, 4), pos=(0, 0), rowspan=1, colspan=1):
global current_figure, current_grid
if current_figure:
ax = plt.subplot2grid(current_grid, pos,
rowspan=rowspan, colspan=colspan)
return (current_figure, ax)
else:
fig = plt.figure(figsize=figsize)
return (fig, plt.gca())
def subplot(pos=(0, 0), rowspan=1, colspan=1):
"""
Prepares a sub-plot inside the current figure between calls
of `begin()` and `end()`.
This method is useful, if a custom plot must be integrated
into a multiplot created with `mastersign.datasience.plot`.
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:return: A tuple with Matplotlib figure and axes: ``(fig, ax)``.
"""
if not _in_multiplot():
raise Exception("No current figure. Did you use begin()?")
return _plt(pos=pos, rowspan=rowspan, colspan=colspan)
def _build_key_colors(keys, color):
if isinstance(color, str):
return repeat(color, len(keys))
elif isinstance(color, Mapping):
return [color.get(k, None) or next(plt.gca()._get_lines.prop_cycler)['color']
for k in keys]
elif isinstance(color, Iterable):
return cycle(color)
else:
return [next(plt.gca()._get_lines.prop_cycler)['color'] for k in keys]
def pie(data: Union[pd.DataFrame, pd.Series],
column=None, label_column=None,
color_column=None, color=None,
startangle=180, counterclock=False,
sort_by=None, title=None, pct=True,
figsize=(4, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a pie chart with values from a column in a DataFrame
or a Series.
:param data: A Pandas DataFrame or Series.
:param column: The column to use if `data` is a DataFrame.
:param label_column: A column to use for the labels. (optional)
By default the index is used.
:param color_column: A column with color names or RGB hex values.
(optional)
:param color: A list or dict for the colors in the pie.
(optional)
If it is a dict the keys are the labels.
Gets overridden by `color_column`.
:param sort_by: The sort mode `None`, `"label"`, or `"value"`
(optional)
:param startangle: The start angle in degrees. (optional)
:param counterclock: A switch to control the angular order. (optional)
:param title: The title of the plot. (optional)
:param pct: A switch to display percentages. (optional)
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
if isinstance(data, pd.DataFrame):
# data is a DataFrame
if column is None:
raise TypeError("If data is a DataFrame, column must be specified.")
if sort_by:
data = data.sort_values(by=label_column) \
if label_column else data.sort_index()
if sort_by == 'value':
data.sort_values(by=column, ascending=False, inplace=True)
x = data[column]
labels = data[label_column] if label_column else data.index
else:
# data is assumed to be a Series
if sort_by:
data = data.sort_index()
if sort_by == 'value':
data.sort_values(ascending=False, inplace=True)
x = data
labels = data.index
color_column = None # ignore color_column for Series
(fig, ax) = _plt(figsize=figsize, pos=pos,
rowspan=rowspan, colspan=colspan)
if color_column:
colors = data[color_column]
elif isinstance(color, Mapping):
colors = [color.get(l) or next(plt.gca()._get_lines.prop_cycler)['color']
for l in labels]
elif color:
colors = color
else:
colors = None
if pct:
ax.pie(x, labels=labels, colors=colors,
startangle=startangle, counterclock=counterclock,
autopct='%1.1f%%')
else:
ax.pie(x, labels=labels, colors=colors,
startangle=startangle, counterclock=counterclock)
ax.axis('equal')
_finish_figure(
fig=fig, ax=ax, title=title, pad=pad,
file_name=file_name, file_dpi=file_dpi)
def pie_groups(data: Union[pd.DataFrame, pd.Series],
column=None, sort_by=None,
startangle=180, counterclock=False,
title=None, pct=True, color=None,
figsize=(4, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a pie chart by counting rows according to a column value
from a DataFrame or values from a Series.
:param data: A Pandas DataFrame or Series.
:param column: The column to use for grouping.
:param sort_by: The sort mode `None`, `"label"`, or `"value"`
:param startangle: The start angle in degrees. (optional)
:param counterclock: A switch to control the angular order. (optional)
:param title: The title of the plot.
:param pct: A switch to display percentages.
:param color: A list or dict for the colors in the pie.
(optional)
If it is a dict the groups are the labels.
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
if isinstance(data, pd.DataFrame):
groups = data.groupby(column, sort=False).size()
else:
groups = data.groupby(by=data, sort=False).size()
group_data = pd.DataFrame({'value': groups}, index=groups.index)
pie(group_data, 'value', sort_by=sort_by,
startangle=startangle, counterclock=counterclock,
title=title, pct=pct, color=color,
figsize=figsize, pad=pad, pos=pos, rowspan=rowspan, colspan=colspan,
file_name=file_name, file_dpi=file_dpi)
def bar(data: Union[pd.DataFrame, pd.Series],
value_column=None, label_column=None,
color_column=None, cmap=None, color=None,
xlabel=None, ylabel=None, title=None,
figsize=(10, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a bar chart from columns in a DataFrame or a Series.
:param data: A Pandas DataFrame or Series.
:param value_column: The column with the values for the bars height.
:param label_column: The column with the labels for the bars. (optional)
:param color_column: The column with a numeric value for choosing
a color from a color map or strings
for explicit colors. (optional)
:param cmap: The name of a color map to use with `color_column`.
(optional)
:param color: A color for all bars or a list with colors. (optional)
`color_column` superseeds `color`.
:param xlabel: The label for the X axis. (optional)
:param ylabel: The label for the Y axis. (optional)
:param title: The title of the plot. (optional)
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
if isinstance(data, pd.DataFrame):
all_columns = [value_column, label_column, color_column]
columns = set(c for c in all_columns if c)
data = data.loc[:, columns].dropna()
values = data[value_column]
if label_column:
labels = data[label_column]
else:
labels = values.index
else:
values = data
labels = data.index
color_column = None # ignore color_column for Series
(fig, ax) = _plt(figsize=figsize, pos=pos,
rowspan=rowspan, colspan=colspan)
bars = ax.bar(labels, values)
if color_column:
colors = data[color_column]
if pd_types.is_numeric_dtype(colors.dtype):
color_map = plt_cm.get_cmap(cmap)
norm = plt_colors.Normalize(vmin=colors.min(), vmax=colors.max())
for b, cv in zip(bars, colors):
b.set_color(color_map(norm(cv)))
else:
for b, c in zip(bars, colors):
b.set_color(c)
elif color:
if type(color) is str:
for b in bars:
b.set_color(color)
else:
for b, c in zip(bars, cycle(color)):
b.set_color(c)
ax.set_xlabel(_col_label(xlabel, label_column))
ax.set_ylabel(_col_label(ylabel, value_column))
_finish_figure(
fig=fig, ax=ax, title=title, pad=pad,
file_name=file_name, file_dpi=file_dpi)
def bar_groups(data: pd.DataFrame,
value_column, key_column, keys=None, label_column=None,
color_column=None, cmap=None, color=None,
stacked=False, relative=False,
xlabel=None, ylabel=None, title=None, legend=True,
figsize=(10, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a bar chart with grouped bars from columns in a DataFrame.
:param data: A Pandas DataFrame.
:param value_column: The column with the values for the bars height.
:param key_column: The column with the key to group by.
:param keys: A list with group keys to select. (optional)
By default the group keys are taken from the key
column and sorted alphabetically.
:param label_column: The column with the labels for the bars. (optional)
:param color_column: The column with a numeric value for choosing
a color from a color map or strings
for explicit colors. (optional)
:param cmap: The name of a color map to use with `color_column`.
(optional)
:param color: A list or dict with colors for the groups. (optional)
`color_column` superseeds `color`.
:param stacked: A switch to stack the bars. (optional)
:param relative: A switch to show relative portions with stacked bars.
(optional)
:param legend: A switch to control the visibility of the legend.
(optional)
:param xlabel: The label for the X axis. (optional)
:param ylabel: The label for the Y axis. (optional)
:param title: The title of the plot. (optional)
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
all_columns = [value_column, key_column, label_column, color_column]
columns = set(c for c in all_columns if c)
data = data.loc[:, columns].dropna()
if keys is None:
keys = data[key_column].drop_duplicates().sort_values().values
groups = {k: data.loc[data[key_column] == k, :] for k in keys}
first_group = groups[keys[0]]
first_labels = first_group[label_column] if label_column else first_group.index
gs = len(keys)
gd = gs + 0.5
if stacked:
pos = list(np.arange(0, len(first_group)))
if relative:
label_scale = 100.0 / sum(g[value_column].values for g in groups.values())
else:
label_scale = [1.0] * len(first_labels)
else:
pos = {k: list(np.arange(i, i + len(first_group) * gd, gd))
for i, k in enumerate(keys)}
if color_column:
color_values = data[color_column]
if pd_types.is_numeric_dtype(color_values.dtype):
color_map = plt_cm.get_cmap(cmap)
norm = plt_colors.Normalize(vmin=color_values.min(), vmax=color_values.max())
(fig, ax) = _plt(figsize=figsize, pos=pos,
rowspan=rowspan, colspan=colspan)
legend_handles = []
last_key = None
for k, c in zip(keys, _build_key_colors(keys, color)):
g = groups[k]
if stacked:
p = pos
if last_key:
bars = ax.bar(p, g[value_column].values * label_scale, color=c,
bottom=groups[last_key][value_column].values * label_scale)
else:
bars = ax.bar(p, g[value_column].values * label_scale, color=c)
last_key = k
else:
bars = ax.bar(pos[k], g[value_column].values, color=c, width=1)
if color_column:
colors = g[color_column]
if
|
pd_types.is_numeric_dtype(colors.dtype)
|
pandas.api.types.is_numeric_dtype
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 2 14:24:25 2017
@author: ajaver
"""
from tierpsy.features.tierpsy_features.helper import get_n_worms_estimate, \
get_delta_in_frames, add_derivatives
from tierpsy.features.tierpsy_features.events import get_event_stats, event_region_labels, event_columns
from tierpsy.features.tierpsy_features.path import get_path_extent_stats
from tierpsy.features.tierpsy_features.features import timeseries_feats_columns, \
ventral_signed_columns, path_curvature_columns, curvature_columns
import pandas as pd
import numpy as np
index_colums = ['worm_index', 'timestamp']
blob_feats_columns = ['blob_area',
'blob_perimeter',
'blob_box_length',
'blob_box_width',
'blob_quirkiness',
'blob_compactness',
'blob_solidity',
'blob_hu0',
'blob_hu1',
'blob_hu2',
'blob_hu3',
'blob_hu4',
'blob_hu5',
'blob_hu6'
]
#get the ratios to be normalized
feats2normalize = {
'L' : [
'head_tail_distance',
'major_axis',
'minor_axis',
'dist_from_food_edge',
'length',
'width_head_base',
'width_midbody',
'width_tail_base'
],
'1/L' : path_curvature_columns + curvature_columns,
'L^2' : ['area']
}
feats2normalize['L'] += [x for x in timeseries_feats_columns if 'radial_velocity' in x]
feats2normalize['L'] += [x for x in timeseries_feats_columns if 'speed' in x]
#add derivatives and make sure there are not duplicates
for k,dat in feats2normalize.items():
dfeats = ['d_' + x for x in dat if not x.startswith('d_')]
feats2normalize[k] = list(set(dat) ^ set(dfeats))
def _normalize_by_w_length(timeseries_data, feats2norm):
'''
Normalize features by body length. This is far from being the most efficient solution, but it is the easier to implement.
'''
def _get_conversion_vec(units_t, median_length_vec):
'''helper function to find how to make the conversion'''
if units_t == 'L':
conversion_vec = 1/median_length_vec
elif units_t == '1/L':
conversion_vec = median_length_vec
elif units_t == 'L^2':
conversion_vec = 1/median_length_vec**2
return conversion_vec
timeseries_data = timeseries_data.copy()
median_length = timeseries_data.groupby('worm_index').agg({'length':'median'})
median_length_vec = timeseries_data['worm_index'].map(median_length['length'])
changed_feats_l = []
for units_t, feats in feats2norm.items():
feats_f = [x for x in timeseries_data if any(x.startswith(f) for f in feats)]
conversion_vec = _get_conversion_vec(units_t, median_length_vec)
for f in feats_f:
timeseries_data[f] *= conversion_vec
changed_feats_l += feats_f
changed_feats = {x: x + '_norm' for x in changed_feats_l}
timeseries_data = timeseries_data.rename(columns = changed_feats)
return timeseries_data, changed_feats
def get_df_quantiles(df,
feats2check = timeseries_feats_columns,
subdivision_dict = {'food_region':['orientation_food_edge']},
feats2norm = feats2normalize,
feats2abs = ventral_signed_columns,
is_remove_subdivided = True,
is_abs_ventral = True,
is_normalize = False
):
'''
Get quantile statistics for all the features given by `feats2check`.
In the features in `feats2abs` we are going to use only the absolute. This is to
deal with worms with unknown dorsal/ventral orientation.
'''
if not feats2check:
return None
q_vals = (0.1, 0.5, 0.9) #percentiles to calculate
iqr_limits = (0.25, 0.75) # range of percentiles used for the interquantile distance
valid_q = q_vals + iqr_limits
df = df.copy() #like this i can modify directoy the df without long lasting consequences
#filter features to be abs
def _filter_ventral_features(feats2check):#%%
valid_f = [x for x in feats2check if any(x.startswith(f) for f in feats2abs)]
return valid_f
#filter default columns in case they are not present
feats2check = [x for x in feats2check if x in df]
#filter default columns in case they are not present. Same for the subdivision dictionary.
subdivision_dict_r = {}
for e_subdivide, feats2subdivide in subdivision_dict.items():
ff = [x for x in feats2check if x in feats2subdivide]
if e_subdivide in df and ff:
subdivision_dict_r[e_subdivide] = ff
subdivision_dict = subdivision_dict_r
#subdivide a feature using the event features
subdivided_df = _get_subdivided_features(df, subdivision_dict = subdivision_dict)
df = df.join(subdivided_df)
feats2check += subdivided_df.columns.tolist()
if is_remove_subdivided:
df = df[[x for x in df if not x in feats2subdivide]]
feats2check = [x for x in feats2check if x not in feats2subdivide]
#add normalized features
if is_normalize:
df, changed_feats = _normalize_by_w_length(df, feats2norm = feats2norm)
feats2check = [x if not x in changed_feats else changed_feats[x] for x in feats2check]
#abs features that are ventral/dorsal side
if is_abs_ventral:
feats2abs = _filter_ventral_features(feats2check)
#find features that match ventral_signed_columns
if feats2abs:
#normalize
if df.size > 0:
df[feats2abs] = df[feats2abs].abs()
#change name
df.columns = [x + '_abs' if x in feats2abs else x for x in df.columns]
feats2check = [x + '_abs' if x in feats2abs else x for x in feats2check]
#calculate quantiles
feat_mean = None
Q = df[feats2check].quantile(valid_q)
feat_mean = pd.concat((feat_mean, Q), axis=1)
#name correctly
dat = []
for q in q_vals:
q_dat = feat_mean.loc[q]
q_str = '_{}th'.format(int(round(q*100)))
for feat, val in q_dat.iteritems():
dat.append((val, feat+q_str))
IQR = feat_mean.loc[0.75] - feat_mean.loc[0.25]
dat += [(val, feat + '_IQR') for feat, val in IQR.iteritems()]
feat_mean_s = pd.Series(*list(zip(*dat)))
return feat_mean_s
def _get_subdivided_features(timeseries_data, subdivision_dict):
'''
subdivision_dict = {event_v1: [feature_v1, feature_v2, ...], event_v2: [feature_vn ...], ...}
event_vector = [-1, -1, 0, 0, 1, 1]
feature_vector = [1, 3, 4, 5, 6, 6]
new_vectors ->
[1, 3, nan, nan, nan, nan]
[nan, nan, 4, 5, nan, nan]
[nan, nan, nan, nan, 6, 6]
'''
#assert all the subdivision keys are known events
assert all(x in event_region_labels.keys() for x in subdivision_dict)
event_type_link = {#%%
'food_region' : '_in_',
'motion_mode' : '_w_'
}
subdivided_data = []
for e_col, timeseries_cols in subdivision_dict.items():
e_data = timeseries_data[e_col].values
if e_col in event_type_link:
str_l = event_type_link[e_col]
else:
str_l = '_'
for flag, label in event_region_labels[e_col].items():
_flag = e_data != flag
for f_col in timeseries_cols:
f_data = timeseries_data[f_col].values.copy()
try:
f_data[_flag] = np.nan
except:
import pdb
pdb.set_trace()
new_name = f_col + str_l + label
subdivided_data.append((new_name, f_data))
if not subdivided_data:
#return empty df if nothing was subdivided
return
|
pd.DataFrame([])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Read streamed and locally saved twitter data."""
import pandas as pd
if __name__ == "__main__":
local_csv_fpath = "CSV_FILE_TWEETS_LOCAL.csv"
cols_to_show = [
# "id",
# "user",
"screen_name",
"location",
"created_at"
# "geo",
# "text",
]
dtypes_dict = {
"id": str,
"user": str,
"screen_name": str,
"location": str,
"text": str,
"followers": int,
}
# Read data
df = pd.read_csv(
local_csv_fpath,
dtype=dtypes_dict,
lineterminator="\n",
parse_dates=["created_at"],
)
# Convert datetime col to EST
df["created_at"] = pd.to_datetime(df["created_at"]).dt.tz_convert(
"US/Eastern"
)
# Show subset of columns
with pd.option_context("display.max_columns", 100):
with
|
pd.option_context("display.max_rows", 500)
|
pandas.option_context
|
# -*- coding: utf-8 -*-
"""Utility methods for Exploratory Data Analysis and Pre-processing"""
import os
import shutil
import operator
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from tqdm import tqdm
#from google.cloud import storage
import seaborn as sns
from sklearn import preprocessing
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from scipy.stats import percentileofscore
from sklearn.mixture import GaussianMixture
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
TM_pal_categorical_3 = ("#ef4631", "#10b9ce", "#ff9138")
sns.set(
style="white",
font_scale=1,
palette=TM_pal_categorical_3,
)
SEED = 42
np.random.seed(SEED)
#### Scoring Helper Functions ####
def pearsonr2(estimator, X, y_true):
"""Calculates r-squared score using pearsonr
Parameters
----------
estimator
The model or regressor to be evaluated
X : pandas dataframe or a 2-D matrix
The feature matrix
y : list of pandas series
The target vector
Returns
----------
float
R2 using pearsonr
"""
y_pred = estimator.predict(X)
return pearsonr(y_true, y_pred)[0]**2
def mae(estimator, X, y_true):
"""Calculates mean absolute error
Parameters
----------
estimator
The model or regressor to be evaluated
X : pandas dataframe or a 2-D matrix
The feature matrix
y : list of pandas series
The target vector
Returns
----------
float
Mean absolute error
"""
y_pred = estimator.predict(X)
return mean_absolute_error(y_true, y_pred)
def rmse(estimator, X, y_true):
"""Calculates root mean squared error
Parameters
----------
estimator
The model or regressor to be evaluated
X : pandas dataframe or a 2-D matrix
The feature matrix
y : list of pandas series
The target vector
Returns
----------
float
Root mean squared error
"""
y_pred = estimator.predict(X)
return np.sqrt(mean_squared_error(y_true, y_pred))
def r2(estimator, X, y_true):
"""Calculates r-squared score using python's r2_score function
Parameters
----------
estimator
The model or regressor to be evaluated
X : pandas dataframe or a 2-D matrix
The feature matrix
y : list of pandas series
The target vector
Returns
----------
float
R-squared score using python's r2_score function
"""
y_pred = estimator.predict(X)
return r2_score(y_true, y_pred)
def mape(estimator, X, y_true):
"""Calculates mean average percentage error
Parameters
----------
estimator
The model or regressor to be evaluated
X : pandas dataframe or a 2-D matrix
The feature matrix
y : list of pandas series
The target vector
Returns
----------
float
Mean average percentage error
"""
y_pred = estimator.predict(X)
return np.mean(np.abs(y_true - y_pred) / np.abs(y_true)) * 100
def adj_r2(estimator, X, y_true):
"""Calculates adjusted r-squared score
Parameters
----------
estimator
The model or regressor to be evaluated
X : pandas dataframe or a 2-D matrix
The feature matrix
y : list of pandas series
The target vector
Returns
----------
float
Adjusted r-squared score
"""
y_pred = estimator.predict(X)
r2 = r2_score(y_true, y_pred)
n = X.shape[0]
k = X.shape[1]
adj_r2 = 1 - (((1-r2)*(n-1))/(n - k - 1))
return adj_r2
def percentile_ranking(series):
"""Converts list of numbers to percentile and ranking
Parameters
----------
series : pandas Series
A series of numbers to be converted to percentile ranking
Returns
----------
list (of floats)
A list of converted percentile values using scipy.stats percentileofscore()
list (of ints)
A list containing the ranks
"""
percentiles = []
for index, value in series.iteritems():
curr_index = series.index.isin([index])
percentile = percentileofscore(series[~curr_index], value)
percentiles.append(percentile)
ranks = series.rank(axis=0, ascending=False)
return percentiles, ranks
#### Plotting Helper Functions ####
def plot_hist(data, title, x_label, y_label, bins=30):
"""Plots histogram for the given data
Parameters
----------
data : pandas Series
The data to plot histogram
title : str
The title of the figure
x_label : str
Label of the x axis
y_label : str
Label of the y-axis
bins : int
Number of bins for histogram
"""
plt.hist(data, bins=bins)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
def plot_regplot(
data,
x_label='Wealth Index',
y_label='Average Nightlight Intensity',
y_var='ntl_mean'
):
"""Produces the regression plot for the given data
Parameters
----------
data : pandas Series
The data to plot regression plot
x_var : str
The variable name of the x-axis
y_var : str
The variable name of the y-axis
x_label : str
Label of the x axis
y_label : str
Label of the y-axis
"""
ax = sns.regplot(
x=x_label,
y=y_var,
data=data,
lowess=True,
line_kws={"color": "black", "lw": 2},
scatter_kws={"alpha": 0.3},
)
plt.ticklabel_format(style='sci', axis='x', scilimits=(1,5))
plt.title(
"Relationship between {} \nand {}".format(
x_label, y_label
)
+ r" ($\rho$ = %.2f, $r$ =%.2f)"
% (
spearmanr(
data[x_label].tolist(), data[y_var].tolist()
)[0],
pearsonr(
data[x_label].tolist(), data[y_var].tolist()
)[0],
)
)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
def plot_corr(
data,
features_cols,
name,
pathway,
indicator="Wealth Index",
figsize=(5, 6),
max_n=30,
):
"""Produces a barplot of the Spearman rank correlation and Pearson's correlation
for a group of values in descending order
Parameters
----------
data : pandas DataFrame
The dataframe containing the feature columns
feature_cols : str
The list of feature column names in the data
name : str
The title of the plot's file
pathway : str
The desired pathway to the plot
indicator : str (default is "Wealth Index")
The socioeconomic indicator to correlate each variable with
figsize : tuple (default is (5,6))
Size of the figure
max_n : int
Maximum number of variables to plot
"""
n = len(features_cols)
spearman = []
pearsons = []
for feature in features_cols:
spearman.append(
( feature, spearmanr(data[feature], data[indicator])[0] )
)
pearsons.append(
( feature, pearsonr(data[feature], data[indicator])[0] )
)
spearman = sorted(spearman, key=lambda x: abs(x[1]))
pearsons = sorted(pearsons, key=lambda x: abs(x[1]))
#
plt.figure(figsize=figsize)
plt.title( "Spearman Correlation Coefficient for {}".format(indicator) )
plt.barh(
[x[0] for x in spearman[n - max_n :]],
[x[1] for x in spearman[n - max_n :]],
)
plt.grid()
#
plt.figure(figsize=figsize)
plt.title( "Pearsons Correlation Coefficient for {}".format(indicator) )
plt.barh(
[x[0] for x in pearsons[n - max_n :]],
[x[1] for x in pearsons[n - max_n :]],
)
plt.grid()
file_name = pathway + name
plt.savefig(fname = file_name)
plt.show(block=False)
#### Nighttime Lights Pre-processing Helper Functions ####
def ntl_agg_fnc(data):
agg = {}
agg['mean'] = data['ntl2016'].mean()
agg['max'] = data['ntl2016'].max()
agg['min'] = data['ntl2016'].min()
agg['median'] = data['ntl2016'].median()
agg['cov'] = data['ntl2016'].cov(data['ntl2016'])
agg['std'] = data['ntl2016'].std()
agg['skewness'] = data['ntl2016'].skew()
agg['kurtosis'] = data['ntl2016'].kurtosis()
return pd.Series(agg, index=[
'mean',
'max',
'min',
'median',
'cov',
'std',
'skewness',
'kurtosis'
])
def unstack_clusters(
data,
id_col='ID',
dhs_col='DHSCLUST',
lat_col='ntllat',
lon_col='ntllon',
ntl_col='ntl2016',
pop_col='pop_sum',
file_col='filename',
ph_prefix=True
):
""" Unstacks nightlights data where certain pixels can belong to two or more clusters.
Makes it so that each row is a unique (cluster, id) pair.
Parameters
----------
data : pandas DataFrame
The nightlights dataset to be unstacked
Returns
----------
pandas DataFrame
A dataframe of unstacked rows
"""
first_row = data.iloc[0, :]
temp = {x: [] for x in [id_col, dhs_col, lat_col, lon_col, ntl_col, file_col, pop_col] if x in first_row}
for index, row in tqdm(
data.iterrows(), total=len(data)
):
clusters = [
x.strip() for x in row[dhs_col].split(",")
]
for cluster in clusters:
if ph_prefix:
cluster = cluster.replace("PH2017", "").lstrip("0")
temp[dhs_col].append(int(cluster))
if id_col in row:
temp[id_col].append(row[id_col])
if lon_col in row:
temp[lon_col].append(row[lon_col])
if lat_col in row:
temp[lat_col].append(row[lat_col])
if ntl_col in row:
temp[ntl_col].append(row[ntl_col])
if pop_col in row:
temp[pop_col].append(row[pop_col])
if file_col in row:
temp[file_col].append(row[file_col])
data =
|
pd.DataFrame(temp)
|
pandas.DataFrame
|
# https://github.com/CSAILVision/places365
# https://github.com/surya501/reverse-image-search
from annoy import AnnoyIndex
from geopy.geocoders import Nominatim
import os
import pickle
from PIL import Image
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision import transforms as trn
import NextPick.config as cfg
# define image transformer
transform = trn.Compose([trn.Resize((256, 256)),
trn.CenterCrop(224),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def load_pretrained_model(arch='resnet18'):
'''
Loads the pretrained PyTorch CNN models pretrained on places365 dataset.
Model options are 'alexnet','densenet161','resnet18', and 'resnet50'.
By default the 'resnet18' architecture is chosen for its lowest memory.
Class labels follow 'categories_places365.txt'.
:return: model for generating feature embeddings and full model for class label prediction
'''
# make sure os.getcwd() returns the project home directory.
model_file = '%s/NextPick/NextPick/%s_places365.pth.tar' %(cfg.APP_PATH, arch)
# load pre-trained weights
model = models.__dict__[arch](num_classes=cfg.NUMCLASS)
model_full = models.__dict__[arch](num_classes=cfg.NUMCLASS)
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict = {str.replace(k, 'module.', ''): v for k, v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
model_full.load_state_dict(state_dict)
model.fc_backup = model.fc
model.fc = nn.Sequential()
model_full.eval()
return model, model_full
def load_pkl_paths(folder):
'''
:param folder: the path of the 'data' folder
:return: a list of paths to pickle files that store the geo data
'''
# pass in the 'data' folder as 'folder'
class_names = [fold for fold in os.listdir(folder)] # this should get folder names at the 'abbey' level
paths_list = []
for cl in class_names:
img_files = [f for f in os.listdir(os.path.join(folder, cl)) if '.pkl' in f]
for img in img_files:
full_path = os.path.join(folder, cl, img)
paths_list.append(full_path)
df =
|
pd.DataFrame(paths_list, columns=['path'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 10:46:02 2018
@author: nce3xin
"""
import torch
import pandas as pd
def extract_min_max_forward_times(df):
min_value=df.iloc[:,13:-1].min().min()
max_value=df.iloc[:,13:-1].max().max()
return min_value,max_value
# ts only is a vector including only time series (not including instance number or usernames etc.)
def to2D(ts,min_forward_times,max_forward_times):
tensor=torch.zeros((max_forward_times-min_forward_times+1,len(ts)),dtype=torch.long)
for i,val in enumerate(ts):
val=int(val)
tensor[val,i]=1
return tensor
def convertTo2D(df,min_forward_times,max_forward_times):
n_row=len(df)
for i in range(n_row):
ts=df.iloc[i,13:-1]
data=torch.zeros(n_row,max_forward_times-min_forward_times+1,len(ts),dtype=torch.long)
for i in range(n_row):
ts=df.iloc[i,13:-1]
tensor2D=to2D(ts,min_forward_times,max_forward_times)
data[i]=tensor2D
return data
def extract_2D_features():
# load merged_train_df and merged_test_df
merged_train_df=
|
pd.read_csv('data/gen/train.csv')
|
pandas.read_csv
|
import Functions
import pandas as pd
import matplotlib.pyplot as plt
def group_sentiment(dfSentiment):
dfSentiment['datetime'] = pd.to_datetime(dfSentiment['created_utc'], unit='s')
dfSentiment['date'] = pd.DatetimeIndex(dfSentiment['datetime']).date
dfSentiment = dfSentiment[
['created_utc', 'negative_comment', 'neutral_comment', 'positive_comment', 'datetime', 'date']]
dfSentiment = dfSentiment.groupby(by=['date']).sum()
return dfSentiment
def cleaning(df):
# Importing Bot user names
bots = pd.read_csv(r'Data\Bots.csv', index_col=0, sep=';')
# Removing bots from the data
df = df[~df.author.isin(bots.bot_names)]
# Removing any NA's
df.dropna()
# Cleaning the text data, fuld af pis i bunden der prøver hvert enkelt før de røg sammen, slet hvis du ikke er intra
keeplist = "?.!,'_-"
import re
Adj_comment = pd.DataFrame(
[re.sub(r'[\S]+\.(net|com|org|info|edu|gov|uk|de|ca|jp|fr|au|us|ru|ch|it|nel|se|no|es|mil)'
r'[\S]*\s?|(/u/|u/)\S+|(/r/|r/)\S+|[\x00-\x1f\x7f-\xff]|[0-9]+|(&g|&l)\S+'
r'|[^\s\w' + keeplist + ']', "", elem) for elem in df['body']], columns=['body'])
df['body'] = Adj_comment['body']
return df
period = ['2014', '2015_01', '2015_02', '2015_03', '2015_04', '2015_05', '2015_06', '2015_07', '2015_08', '2015_09',
'2015_10', '2015_11', '2015_12', '2016_01', '2016_02', '2016_03', '2016_04', '2016_05', '2016_06', '2016_07',
'2016_08', '2016_09', '2016_10',
'2016_11', '2016_12', '2017_01', '2017_02', '2017_03', '2017_04', '2017_05', '2017_06', '2017_07', '2017_08',
'2017_09',
'2017_10', '2017_11', '2017_12', '2018_01', '2018_02', '2018_03', '2018_04', '2018_05', '2018_06', '2018_07',
'2018_08',
'2018_09', '2018_10', '2018_11', '2018_12', '2019_01', '2019_02', '2019_03', '2019_04', '2019_05', '2019_06',
'2019_07',
'2019_08', '2019_09']
dfAllData = pd.DataFrame()
for sPeriod in period:
query = r"""
#standardSQL
SELECT author, subreddit, created_utc, score, controversiality, body
FROM `fh-bigquery.reddit_comments.{}`
WHERE REGEXP_CONTAINS(body, r'(?i)\b Dash\b')
""".format(sPeriod)
dfData = Functions.collect_big_query(sQuery=query)
print(sPeriod + ' Collected')
print(sPeriod + ' cleaned')
dfAllData = dfAllData.append(dfData)
del dfData
dfAllData.to_csv('Dash_sentiment.csv')
coin_list = ['BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS']
dfSubRed = pd.DataFrame()
for scoin in coin_list:
dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)
dfTemp = dfTemp.dropna()
dfSubRed = pd.concat([dfSubRed, pd.DataFrame(dfTemp.subreddit.value_counts()[:10].index),
pd.DataFrame(dfTemp.subreddit.value_counts()[:10].values)], axis=1)
# Removing disturbing subreddits:
# EOS:
EOS_list = ['ffxiv', 'photography', 'masseffect', 'whowouldwin', 'astrophotography', 'elementaryos']
dfTemp = pd.read_csv('EOS_sentiment.csv', index_col=0)
dfTemp = dfTemp[~dfTemp['subreddit'].isin(EOS_list)]
dfTemp.to_csv('EOS_R_Sentiment.csv')
# Ripple: indianapolis
XRP_list = ['indianapolis']
dfTemp = pd.read_csv('XRP_sentiment.csv', index_col=0) # 510558
dfTemp = dfTemp[~dfTemp['subreddit'].isin(XRP_list)]
dfTemp.to_csv('XRP_R_Sentiment.csv')
# BNB: SquaredCircle, dragonballfighterz, StreetFighter, step1, AirBnB
BNB_list = ['SquaredCircle', 'dragonballfighterz', 'StreetFighter', 'step1', 'AirBnB']
dfTemp = pd.read_csv('BNB_R_Sentiment.csv', index_col=0) # 109630
dfTemp = dfTemp[~dfTemp['subreddit'].isin(BNB_list)]
dfTemp.to_csv('BNB_R_Sentiment.csv')
# New coin list
coin_list_R = ['BCH', 'Cardona', 'dogecoin', 'EOS_R', 'ETH', 'LTC', 'XRP_R', 'Monero', 'BNB_R', 'IOTA', 'TEZOS']
# Removing NA's
for scoin in coin_list_R:
dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)
dfTemp = dfTemp.dropna()
dfTemp.to_csv(scoin + 'NA_Sentiment.csv')
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
for scoin in coin_list_NA:
dfTemp = pd.read_csv(scoin + '_Sentiment.csv', index_col=0)
dfTemp = cleaning(dfTemp)
# dfAllData = Functions.language_filter(dfAllData, series='body', language_select='en')
dfTemp = dfTemp.reset_index(drop=True)
dfTemp = Functions.get_sentiment(dfTemp, series='body')
dfTemp = group_sentiment(dfTemp)
dfTemp.to_csv(scoin + '_Actual_Sentiment.csv')
# Kør herfra ved start for at få fat i de nødvendige funktioner og dataframes
import Functions
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date']).date
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date']).date
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket = dfMarket[1:]
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom14']
dfMOM14 = dfMOM14.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Market Cap']
dfMarketCap = dfMarketCap.merge(dfTemp, how='left', left_index=True, right_index=True)
dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=',')
if coin_list[i] == 'BTC':
# dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=';')
dfSentiment = pd.read_csv('Data/All_Merged.csv', index_col=0, sep=',')
dfSentiment = dfSentiment[['positive_comment', 'neutral_comment', 'negative_comment']]
dfSentiment['Date'] = dfSentiment.index
dfSentiment['Date'] = pd.to_datetime(dfSentiment['Date'])
dfSentiment.index = pd.DatetimeIndex(dfSentiment['Date']).date
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['positive_comment']
dfPositive = dfPositive.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['negative_comment']
dfNegative = dfNegative.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['neutral_comment']
dfNeutral = dfNeutral.merge(dfTemp, how='left', left_index=True, right_index=True)
dfMarket['Coin'] = coin_list[i]
del dfSentiment['Date']
dfData = dfMarket.merge(dfSentiment, how='inner', left_index=True, right_index=True)
dfData = dfData.reset_index()
del dfData['index']
dfAllCoins = dfAllCoins.append(dfData)
dfAllCoins = dfAllCoins.drop(['created_utc'], axis=1)
dfWMR = pd.DataFrame()
dfReturnsLag = dfReturns.iloc[1:,:]
dfMarketCapLag = dfMarketCap.iloc[:-1,:]
dfMarketCapLag.index = dfReturnsLag.index
dfWMR['WMR'] = dfReturnsLag.multiply(dfMarketCapLag).sum(axis=1) / dfMarketCapLag.sum(axis=1)
dfPositiveSentimentSignal = pd.DataFrame()
dfNegativeSentimentSignal = pd.DataFrame()
dfAveragePositiveSentimentSignal =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Utilities for testing ixmp.
These include:
- pytest hooks, `fixtures <https://docs.pytest.org/en/latest/fixture.html>`_:
.. autosummary::
:nosignatures:
ixmp_cli
tmp_env
test_mp
…and assertions:
.. autosummary::
assert_logs
- Methods for setting up and populating test ixmp databases:
.. autosummary::
add_test_data
create_test_platform
make_dantzig
populate_test_platform
- Methods to run and retrieve values from Jupyter notebooks:
.. autosummary::
run_notebook
get_cell_output
"""
import contextlib
import logging
import os
from collections import namedtuple
from contextlib import contextmanager
from copy import deepcopy
from itertools import chain, product
from math import ceil
try:
import resource
has_resource_module = True
except ImportError:
# Windows
has_resource_module = False
import shutil
import sys
import numpy as np
import pandas as pd
import pint
import pytest
import xarray as xr
from click.testing import CliRunner
from . import cli
from . import config as ixmp_config
from .core import IAMC_IDX, Platform, Scenario, TimeSeries
from .reporting import Quantity
log = logging.getLogger(__name__)
models = {
"dantzig": {
"model": "canning problem",
"scenario": "standard",
},
}
# pytest hooks and fixtures
def pytest_sessionstart(session):
"""Unset any configuration read from the user's directory."""
ixmp_config.clear()
# Further clear an automatic reference to the user's home directory.
# See fixture tmp_env below
ixmp_config.values["platform"]["local"].pop("path")
def pytest_report_header(config, startdir):
"""Add the ixmp configuration to the pytest report header."""
return f"ixmp config: {repr(ixmp_config.values)}"
@pytest.fixture(scope="session")
def ixmp_cli(tmp_env):
"""A CliRunner object that invokes the ixmp command-line interface."""
class Runner(CliRunner):
def invoke(self, *args, **kwargs):
return super().invoke(cli.main, *args, env=tmp_env, **kwargs)
yield Runner()
@pytest.fixture
def protect_pint_app_registry():
"""Protect pint's application registry.
Use this fixture on tests which invoke code that calls
:meth:`pint.set_application_registry`. It ensures that the environment for
other tests is not altered.
"""
import pint
# Use deepcopy() in case the wrapped code modifies the application
# registry without swapping out the UnitRegistry instance for a different
# one
saved = deepcopy(pint.get_application_registry())
yield
pint.set_application_registry(saved)
@pytest.fixture(scope="session")
def tmp_env(tmp_path_factory):
"""Return the os.environ dict with the IXMP_DATA variable set.
IXMP_DATA will point to a temporary directory that is unique to the
test session. ixmp configuration (i.e. the 'config.json' file) can be
written and read in this directory without modifying the current user's
configuration.
"""
base_temp = tmp_path_factory.getbasetemp()
os.environ["IXMP_DATA"] = str(base_temp)
# Set the path for the default/local platform in the test directory
localdb = base_temp / "localdb" / "default"
ixmp_config.values["platform"]["local"]["path"] = localdb
# Save for other processes
ixmp_config.save()
yield os.environ
@pytest.fixture(scope="class")
def test_mp(request, tmp_env, test_data_path):
"""An empty ixmp.Platform connected to a temporary, in-memory database."""
# Long, unique name for the platform.
# Remove '/' so that the name can be used in URL tests.
platform_name = request.node.nodeid.replace("/", " ")
# Add a platform
ixmp_config.add_platform(
platform_name, "jdbc", "hsqldb", url=f"jdbc:hsqldb:mem:{platform_name}"
)
# Launch Platform
mp = Platform(name=platform_name)
yield mp
# Teardown: don't show log messages when destroying the platform, even if
# the test using the fixture modified the log level
mp._backend.set_log_level(logging.CRITICAL)
del mp
# Remove from config
ixmp_config.remove_platform(platform_name)
def bool_param_id(name):
"""Parameter ID callback for :meth:`pytest.mark.parametrize`.
This formats a boolean value as 'name0' (False) or 'name1' (True) for
easier selection with e.g. ``pytest -k 'name0'``.
"""
return lambda value: "{}{}".format(name, int(value))
# Create and populate ixmp databases
def add_test_data(scen: Scenario):
# New sets
t_foo = ["foo{}".format(i) for i in (1, 2, 3)]
t_bar = ["bar{}".format(i) for i in (4, 5, 6)]
t = t_foo + t_bar
y = list(map(str, range(2000, 2051, 10)))
# Add to scenario
scen.init_set("t")
scen.add_set("t", t)
scen.init_set("y")
scen.add_set("y", y)
# Data
ureg = pint.get_application_registry()
x = Quantity(
xr.DataArray(np.random.rand(len(t), len(y)), coords=[("t", t), ("y", y)]),
units=ureg.kg,
)
# As a pd.DataFrame with units
x_df = x.to_series().rename("value").reset_index()
x_df["unit"] = "kg"
scen.init_par("x", ["t", "y"])
scen.add_par("x", x_df)
return t, t_foo, t_bar, x
MODEL = "canning problem"
SCENARIO = "standard"
HIST_DF = pd.DataFrame(
[[MODEL, SCENARIO, "DantzigLand", "GDP", "USD", 850.0, 900.0, 950.0]],
columns=IAMC_IDX + [2000, 2005, 2010],
)
INP_DF = pd.DataFrame(
[[MODEL, SCENARIO, "DantzigLand", "Demand", "cases", 850.0, 900.0]],
columns=IAMC_IDX + [2000, 2005],
)
TS_DF =
|
pd.concat([HIST_DF, INP_DF], sort=False)
|
pandas.concat
|
import argparse
import os
import pdb
import shutil
from timeit import default_timer as timer
import numpy as np
import pandas as pd
from tqdm import tqdm
from evaluation import write_submission
def iters_ensemble(args):
'''
Ensemble on different iterations and generate ensembled files in fusioned folder
'''
## directories
if args.task_type == 'sed_only':
# iterations ensemble directory
fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_mask_fusioned')
os.makedirs(fusioned_dir, exist_ok=True)
fusion_fn = '_fusion_sed_epoch_{}'
iterator = range(38, 42, 2)
elif args.task_type == 'two_staged_eval':
# iterations ensemble directory
fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'doa_fusioned')
os.makedirs(fusioned_dir, exist_ok=True)
fusion_fn = '_fusion_doa_epoch_{}'
iterator = range(78, 82, 2)
## average ensemble
print('\n===> Average ensemble')
ensemble_start_time = timer()
predicts_fusioned = []
for epoch_num in iterator:
fusion_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), fusion_fn.format(epoch_num))
for fn in sorted(os.listdir(fusion_dir)):
if fn.endswith('.csv') and not fn.startswith('.'):
fn_path = os.path.join(fusion_dir, fn)
predicts_fusioned.append(pd.read_csv(fn_path, header=0, index_col=0).values)
if len(predicts_fusioned) > file_num:
for n in range(file_num):
min_len = min(predicts_fusioned[n].shape[0], predicts_fusioned[n+file_num].shape[0])
predicts_fusioned[n] = (predicts_fusioned[n][:min_len,:] + predicts_fusioned[n+file_num][:min_len,:]) / 2
predicts_fusioned = predicts_fusioned[:file_num]
print('\nAverage ensemble time: {:.3f} s.'.format(timer()-ensemble_start_time))
## write the fusioned sed probabilities or doa predictions to fusioned files
print('\n===> Write the fusioned sed probabilities or doa predictions to fusioned files')
# this folder here is only used for supplying fn
iterator = tqdm(sorted(os.listdir(fusion_dir)), total=len(os.listdir(fusion_dir)), unit='iters')
n = 0
for fn in iterator:
if fn.endswith('.csv') and not fn.startswith('.'):
# write to sed_mask_fusioned folder
fn_path = os.path.join(fusioned_dir, fn)
df_output = pd.DataFrame(predicts_fusioned[n])
df_output.to_csv(fn_path)
n += 1
iterator.close()
print('\n' + fusioned_dir)
print('\n===> Iterations ensemble finished!')
def threshold_iters_ensemble(args):
'''
Threshold the ensembled iterations and write to submissions
'''
# directories
sed_mask_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_mask_fusioned')
doa_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'doa_fusioned')
if args.task_type == 'sed_only':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_test_fusioned')
elif args.task_type == 'two_staged_eval':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'all_test_fusioned')
os.makedirs(test_fusioned_dir, exist_ok=True)
if args.task_type == 'sed_only':
iterator = tqdm(sorted(os.listdir(sed_mask_fusioned_dir)), total=len(os.listdir(sed_mask_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_prob.csv') and not fn.startswith('.'):
fn_path = os.path.join(sed_mask_fusioned_dir, fn)
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to sed_test_fusioned
fn_noextension = fn.split('_prob')[0]
output_doas = np.zeros((prob_fusioned.shape[0],22))
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': output_doas
}
write_submission(submit_dict, test_fusioned_dir)
if args.task_type == 'two_staged_eval':
iterator = tqdm(sorted(os.listdir(doa_fusioned_dir)), total=len(os.listdir(doa_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_doa.csv') and not fn.startswith('.'):
fn_noextension = fn.split('_doa')[0]
# read sed predictions from sed_mask_fusioned directory
fn_path = os.path.join(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# read doa predictions from doa_fusioned directory
fn_path = os.path.join(doa_fusioned_dir, fn)
doa_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to all_test_fusioned
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': doa_fusioned
}
write_submission(submit_dict, test_fusioned_dir)
iterator.close()
print('\n' + test_fusioned_dir)
print('\n===> Threshold iterations ensemble finished!')
def models_ensemble(args):
'''
Ensemble on different iterations and generate ensembled files in fusioned folder
'''
# directories
if args.task_type == 'sed_only':
fusion_folder = 'sed_mask_fusioned'
fusioned_folder = 'sed_mask_models_fusioned'
elif args.task_type == 'two_staged_eval':
fusion_folder = 'doa_fusioned'
fusioned_folder = 'doa_models_fusioned'
print('\n===> Model average ensemble')
ensemble_start_time = timer()
predicts_fusioned = []
for model_folder in sorted(os.listdir(submissions_dir)):
if not model_folder.startswith('.') and model_folder != 'models_ensemble':
print('\n' + model_folder)
fusion_dir = os.path.join(submissions_dir, model_folder, fusion_folder)
for fn in sorted(os.listdir(fusion_dir)):
if fn.endswith('.csv') and not fn.startswith('.'):
fn_path = os.path.join(fusion_dir, fn)
predicts_fusioned.append(pd.read_csv(fn_path, header=0, index_col=0).values)
if len(predicts_fusioned) > file_num:
for n in range(file_num):
min_len = min(predicts_fusioned[n].shape[0], predicts_fusioned[n+file_num].shape[0])
predicts_fusioned[n] = (predicts_fusioned[n][:min_len,:] + predicts_fusioned[n+file_num][:min_len,:]) / 2
predicts_fusioned = predicts_fusioned[:file_num]
print('\nAverage ensemble time: {:.3f} s.'.format(timer()-ensemble_start_time))
## write the fusioned sed probabilities or doa predictions to fusioned files
print('\n===> Write the fusioned sed probabilities or doa predictions to fusioned files')
# this folder here is only used for supplying fn
iterator = tqdm(sorted(os.listdir(fusion_dir)), total=len(os.listdir(fusion_dir)), unit='iters')
models_ensemble_dir = os.path.join(submissions_dir, 'models_ensemble', fusioned_folder)
os.makedirs(models_ensemble_dir, exist_ok=True)
n = 0
for fn in iterator:
if fn.endswith('.csv') and not fn.startswith('.'):
# write to sed_mask_fusioned folder
fn_path = os.path.join(models_ensemble_dir, fn)
df_output = pd.DataFrame(predicts_fusioned[n])
df_output.to_csv(fn_path)
n += 1
iterator.close()
print('\n' + models_ensemble_dir)
print('\n===> Models ensemble finished!')
def threshold_models_ensemble(args):
'''
Threshold the ensembled models and write to submissions
'''
# directories
sed_mask_fusioned_dir = os.path.join(submissions_dir, 'models_ensemble', 'sed_mask_models_fusioned')
doa_fusioned_dir = os.path.join(submissions_dir, 'models_ensemble', 'doa_models_fusioned')
if args.task_type == 'sed_only':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_test_fusioned')
elif args.task_type == 'two_staged_eval':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'all_test_fusioned')
os.makedirs(test_fusioned_dir, exist_ok=True)
if args.task_type == 'sed_only':
iterator = tqdm(sorted(os.listdir(sed_mask_fusioned_dir)), total=len(os.listdir(sed_mask_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_prob.csv') and not fn.startswith('.'):
fn_path = os.path.join(sed_mask_fusioned_dir, fn)
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to sed_test_fusioned
fn_noextension = fn.split('_prob')[0]
output_doas = np.zeros((prob_fusioned.shape[0],22))
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': output_doas
}
write_submission(submit_dict, test_fusioned_dir)
if args.task_type == 'two_staged_eval':
iterator = tqdm(sorted(os.listdir(doa_fusioned_dir)), total=len(os.listdir(doa_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_doa.csv') and not fn.startswith('.'):
fn_noextension = fn.split('_doa')[0]
# read sed predictions from sed_mask_fusioned directory
fn_path = os.path.join(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')
prob_fusioned =
|
pd.read_csv(fn_path, header=0, index_col=0)
|
pandas.read_csv
|
# First import the necessary modules
import scipy as sp
import numpy as np
# Visualization libraries
import matplotlib.pyplot as plt
import seaborn as sb
# Data analysis library
import pandas as pd
# WOrking with dates
import datetime
import sys
import os
datadir='sample_data'
os.chdir(datadir)
# Import data files downloaded manually from Grafana and placed in the same folder as this notebook
HUM1 =
|
pd.read_csv("s_2.csv",sep='\t', header=None, decimal=",")
|
pandas.read_csv
|
# RAiSERHD module
# <NAME>, 23 Feb 2022
# import packages
import h5py
import numpy as np
import pandas as pd
import time as ti
import os, warnings
from astropy import constants as const
from astropy import units as u
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.cosmology import FlatLambdaCDM
from astropy.io import fits
from astropy import wcs
from copy import copy
from matplotlib import pyplot as plt
from matplotlib import cm, rc
from matplotlib.colors import LogNorm
from matplotlib.ticker import FormatStrFormatter, NullFormatter, LogLocator
from numba import jit
from scipy.optimize import least_squares
from scipy.special import gamma, zeta
## Define global variables that can be adjusted to customise model output
# basic constants
year = 365.2422*24*3600 # average year in seconds
maverage = (0.6*const.m_p.value) # kg average particle mass
hubble = 0.7 # dimensionless Hubble parameter
OmegaM = 0.27 # fraction of matter in the flat universe
OmegaD = 0.73 # fraction of dark energy in the flat universe
freq_cmb = 5.879e10 # frequency of cosmic microwave background at z = 0
temp_cmb = 2.725 # temperature of cosmic microwave background at z = 0
c_speed = const.c.value # speed of light
e_charge = const.e.value # electron charge
k_B = const.k_B.value # Boltzmann constant
m_e = const.m_e.value # electron mass
mu0 = const.mu0.value # vacuum permeability
sigma_T = const.sigma_T.value # electron scattering cross-section
# model parameters that can be optimised for efficiency
nangles = 16 # number of angles to calculate expansion rate along (must be greater than 1)
betaRegions = 64 # set maximum number of beta regions
limTime = (year) # the FR-II limit must be used before this time
stepRatio = 1.01 # ratio to increase time/radius
crit_age = 0.95 # fraction of source age for lower end of power law approximations
lambda_min = 1e-256 # minimum value of Lambda for computational efficiency
# shocked gas and lobe parameters
chi = 2*np.pi/3.0 # lobe geometry parameter
shockAxisRatio = 0.5875 # exponent relating the cocoon axis ratio to the shocked gas axis ratio
shockRadius = 1.072 # fraction of the radius the shocked gas is greater than the lobe
gammaX = (5./3) # lorentz factor of external gas
gammaJ = (4./3) # lorentz factor of jet plasma
# set electron energy distribution constants
Lorentzmin = 780. # minimum Lorentz factor of injected electrons AT HOTSPOT for Cygnus A
Lorentzmax = 1e6 # effectively infinity
# density and temperature profiles
rCutoff = 0.01 # minimum radius to match profiles as a fraction of r200
betaMax = 2 # set critical value above which the cocoon expands balistically
# average and standard deviation of Vikhlinin model parameters
alphaAvg = 1.64 # corrected for removal of second core term
alphaStdev = 0.30
betaPrimeAvg = 0.56
betaPrimeStdev = 0.10
gammaPrimeAvg = 3
gammaPrimeStdev = 0
epsilonAvg = 3.23
epsilonStdev = 0 # 1.93; this parameter has little effect on profile
rCoreAvg = 0.087 # this is ratio of rc to r200
rCoreStdev = 0.028
rSlopeAvg = 0.73 # this is ratio of rs to r200
rSlopeStdev = 0 # 0.39; this parameter has little effect on profile
# temperature parameters
TmgConst = (-2.099)
TmgSlope = 0.6678
TmgError = 0.0727
# new temperature parameters assuming heating from AGN during expansion
TmgAvg = 7.00
TmgStdev = 0.28
# approximate halo to gas fraction conversion
# for halo masses between 10^12 and 10^15 and redshifts 0 < z < 5
halogasfracCONST1z0 = (-0.881768418)
halogasfracCONST1z1 = (-0.02832004)
halogasfracCONST2z0 = (-0.921393448)
halogasfracCONST2z1 = 0.00064515
halogasfracSLOPE = 0.053302276
# uncertainties, in dex
dhalogasfracz0 = 0.05172769
dhalogasfracz1 = (-0.00177947)
# correction to SAGE densities
SAGEdensitycorr = (-0.1)
## Define functions for run-time user output
def __join(*values):
return ";".join(str(v) for v in values)
def __color_text(s, c, base=30):
template = '\x1b[{0}m{1}\x1b[0m'
t = __join(base+8, 2, __join(*c))
return template.format(t, s)
class Colors:
DogderBlue = (30, 144, 255)
Green = (0,200,0)
Orange = (255, 165, 0)
## Define main function to run RAiSE HD
def RAiSE_run(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5, equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, brightness=True, angle=0., resolution='standard', seed=None, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# record start time of code
start_time = ti.time()
# function to test type of inputs and convert type where appropriate
if nangles <= 1:
raise Exception('Private variable nangles must be greater than 1.')
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# download and pre-process particles from hydrodynamical simulation
if not resolution == None:
print(__color_text('Reading particle data from file.', Colors.Green))
time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio = __PLUTO_particles('RAiSE_particles.hdf5')
# set seed for quasi-random profiles
if not seed == None:
__set_seed(seed)
# create folder for output files if not present
if not os.path.exists('LDtracks'):
os.mkdir('LDtracks')
if not resolution == None:
print(__color_text('Running RAiSE dynamics and emissivity.', Colors.Green))
else:
print(__color_text('Running RAiSE dynamics.', Colors.Green))
for i in range(0, len(redshift)):
for j in range(0, len(axis_ratio)):
for k in range(0, len(jet_power)):
for l in range(0, nenvirons):
for m in range(0, len(active_age)):
for n in range(0, len(equipartition)):
for o in range(0, len(jet_lorentz)):
# set correct data types for halo mass and core density
if isinstance(halo_mass, (list, np.ndarray)):
new_halo_mass = halo_mass[l]
else:
new_halo_mass = halo_mass
if isinstance(rho0Value, (list, np.ndarray)):
new_rho0Value = rho0Value[l]
new_temperature = temperature[l]
new_betas = betas[l]
new_regions = regions[l]
else:
new_rho0Value = rho0Value
new_temperature = temperature
new_betas = betas
new_regions = regions
# calculate dynamical evolution of lobe and shocked shell using RAiSE dynamics
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda = __RAiSE_environment(redshift[i], axis_ratio[j], jet_power[k], source_age, halo_mass=new_halo_mass, rand_profile=rand_profile, rho0Value=new_rho0Value, regions=new_regions, betas=new_betas, temperature=new_temperature, active_age=active_age[m], jet_lorentz=jet_lorentz[o], gammaCValue=gammaCValue, aj_star=aj_star, jet_angle=jet_angle, axis_exponent=axis_exponent, fill_factor=fill_factor)
# calculate synchrotron emission from lobe using particles and RAiSE model
if not resolution == None:
location, luminosity, magnetic_field = __RAiSE_emissivity(frequency, redshift[i], time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age[m], equipartition[n], spectral_index, gammaCValue=gammaCValue, lorentz_min=lorentz_min, resolution=resolution)
# create pandas dataframe for integrated emission
df = pd.DataFrame()
df['Time (yrs)'] = 10**np.asarray(source_age).astype(np.float_)
df['Size (kpc)'] = 2*lobe_lengths[0,:]/const.kpc.value
df['Pressure (Pa)'] = shock_pressures[0,:]
df['Axis Ratio'] = lobe_lengths[0,:]/lobe_lengths[-1,:]
if not resolution == None:
for q in range(0, len(frequency)):
if frequency[q] > 0:
df['B{:.2f} (T)'.format(frequency[q])] = magnetic_field[:,q]
df['L{:.2f} (W/Hz)'.format(frequency[q])] = np.nansum(luminosity[:,:,q], axis=1)
# write data to file
if isinstance(rho0Value, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
elif isinstance(halo_mass, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate brightness per pixel across the source
if brightness == True and not resolution == None:
x_values, y_values, brightness_list = __RAiSE_brightness_map(frequency, redshift[i], source_age, lobe_lengths, location, luminosity, angle, resolution=resolution)
for p in range(0, len(source_age)):
for q in range(0, len(frequency)):
# create pandas dataframe for spatially resolved emission
if isinstance(x_values[p][q], (list, np.ndarray)):
df = pd.DataFrame(index=x_values[p][q]/const.kpc.value, columns=y_values[p][q]/const.kpc.value, data=brightness_list[p][q])
# write surface brightness map to file
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
else:
if isinstance(rho0Value, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
elif isinstance(halo_mass, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# print total run time to screen
print(__color_text('RAiSE completed running after {:.2f} seconds.'.format(ti.time() - start_time), Colors.Green))
# Define function to test type of inputs and convert type where appropriate
def __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz):
# convert redshift, axis ratio and jet power to correct data types
if not isinstance(frequency, (list, np.ndarray)):
frequency = [frequency]
for i in range(0, len(frequency)):
if not isinstance(frequency[i], (int, float)):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
else:
if frequency[i] <= 0:
frequency[i] = -1.
warnings.warn('Pressure map will be produced instead of surface brightness image.', category=UserWarning)
elif not (5 < frequency[i] and frequency[i] < 20):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
if not isinstance(redshift, (list, np.ndarray)):
redshift = [redshift]
for i in range(0, len(redshift)):
if not isinstance(redshift[i], (int, float)) or not (0 < redshift[i] and redshift[i] < 20):
raise Exception('Redshift must be provided as a float or list/array of floats.')
if not isinstance(axis_ratio, (list, np.ndarray)):
axis_ratio = [axis_ratio]
for i in range(0, len(axis_ratio)):
if not isinstance(axis_ratio[i], (int, float)) or not (1 <= axis_ratio[i] and axis_ratio[i] < 20):
raise Exception('Axis ratio must be provided as a float or list/array of floats and be greater than 1.')
if not isinstance(jet_power, (list, np.ndarray)):
jet_power = [jet_power]
for i in range(0, len(jet_power)):
if not isinstance(jet_power[i], (int, float)) or not (33 < jet_power[i] and jet_power[i] < 46):
raise Exception('Jet power must be provided as a float or list/array of floats in units of log10 Watts.')
if not isinstance(source_age, (list, np.ndarray)):
source_age = [source_age]
for i in range(0, len(source_age)):
if not isinstance(source_age[i], (int, float)) or not (0 <= source_age[i] and source_age[i] <= 10.14):
raise Exception('Source age must be provided as a float or list/array of floats in units of log10 years.')
else:
source_age[i] = float(source_age[i])
if not isinstance(active_age, (list, np.ndarray)):
active_age = [active_age]
for i in range(0, len(active_age)):
if not isinstance(active_age[i], (int, float)) or not (0 <= active_age[i] and active_age[i] <= 10.14):
raise Exception('Active age must be provided as a float or list/array of floats in units of log10 years.')
if not isinstance(equipartition, (list, np.ndarray)):
equipartition = [equipartition]
for i in range(0, len(equipartition)):
if not isinstance(equipartition[i], (int, float)) or not (-6 < equipartition[i] and equipartition[i] < 6):
raise Exception('Equipartition factor must be provided as a float or list/array of floats in units of log10.')
if not isinstance(jet_lorentz, (list, np.ndarray)):
jet_lorentz = [jet_lorentz]
for i in range(0, len(jet_lorentz)):
if not isinstance(jet_lorentz[i], (int, float)) or not (-100 <= jet_lorentz[i] and jet_lorentz[i] < 20):
raise Exception('Jet bulk lorentz factor factor must be provided as a float or list/array of floats.')
elif (-100 <= jet_lorentz[i] and jet_lorentz[i] <= 1):
jet_lorentz[i] = 0
warnings.warn('Jet phase will not be included in this simulation.', category=UserWarning)
# convert environment to correct data types
if not isinstance(halo_mass, (list, np.ndarray)) and not halo_mass == None:
halo_mass = [halo_mass]
nenvirons_halo = len(halo_mass)
elif not halo_mass == None:
nenvirons_halo = len(halo_mass)
if isinstance(halo_mass, (list, np.ndarray)):
for i in range(0, len(halo_mass)):
if not isinstance(halo_mass[i], (int, float)) or not (9 < halo_mass[i] and halo_mass[i] < 17):
raise Exception('Dark matter halo mass must be provided as a float or list/array of floats in units of log10 stellar mass.')
if not isinstance(rho0Value, (list, np.ndarray)) and not rho0Value == None:
rho0Value = [rho0Value]
nenvirons_rho = len(rho0Value)
elif not rho0Value == None:
nenvirons_rho = len(rho0Value)
if isinstance(rho0Value, (list, np.ndarray)):
if not isinstance(temperature, (list, np.ndarray)) and not temperature == None:
temperature = [temperature]*nenvirons_rho
elif temperature == None or not len(temperature) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(betas, (list, np.ndarray)) and not isinstance(betas[0], (list, np.ndarray)):
betas = [betas]*nenvirons_rho
elif not isinstance(betas, (list, np.ndarray)) and not betas == None:
betas = [[betas]]*nenvirons_rho
elif betas == None or not len(betas) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(regions, (list, np.ndarray)) and not isinstance(regions[0], (list, np.ndarray)):
regions = [regions]*nenvirons_rho
elif not isinstance(regions, (list, np.ndarray)) and not betas == None:
regions = [[regions]]*nenvirons_rho
elif regions == None or not len(regions) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(rho0Value, (list, np.ndarray)):
nenvirons = nenvirons_rho
for i in range(0, len(rho0Value)):
if not isinstance(rho0Value[i], (int, float)) or not (1e-30 < rho0Value[i] and rho0Value[i] < 1e-15):
raise Exception('Core gas density must be provided as a float or list/array of floats in units of kg/m^3.')
for i in range(0, len(temperature)):
if not isinstance(temperature[i], (int, float)) or not (0 < temperature[i] and temperature[i] < 1e12):
raise Exception('Gas temperature must be provided as a float or list/array of floats in units of Kelvin.')
else:
nenvirons = nenvirons_halo
return frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons
# Define random seed function
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __set_seed(value):
np.random.seed(value)
## Define functions for analytic modelling of the environment
# function to calculate properties of the environment and call RAiSE_evolution
def __RAiSE_environment(redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., gammaCValue=5./3, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# check minimal inputs
if halo_mass == None and (not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray))):
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate gas mass and virial radius of halo unless density and temperature profile fully specified
gasfraction = 0
if not halo_mass == None:
rVir = (10**halo_mass*const.M_sun.value/(100./const.G.value*(100.*hubble*np.sqrt(OmegaM*(1 + redshift)**3 + OmegaD)/const.kpc.value)**2))**(1./3)
if rand_profile == False:
gasfraction = __HalogasfracFunction(halo_mass, redshift)
else:
gasfraction = __rand_norm(__HalogasfracFunction(halo_mass, redshift), __dHalogasfracFunction(halo_mass, redshift))
gasMass = 10**(halo_mass + gasfraction)*const.M_sun.value
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
if not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray)):
# set maximum number of regions
nregions = betaRegions
nregions, new_betas, new_regions = __DensityProfiler(rVir, nregions, rand_profile)
elif len(betas) == len(regions):
# set maximum number of regions
nregions = len(betas)
new_betas = np.asarray(betas.copy())
new_regions = np.asarray(regions.copy())
else:
raise Exception('Variables betas and regions must be arrays of the same length.')
# calculate the average temperature of the external medium
if temperature == None:
if not halo_mass == None:
if rand_profile == False:
tempFlat = 10**TmgAvg
tempCluster = 10**(TmgConst + TmgSlope*halo_mass)
else:
tempFlat = 10**(__rand_norm(TmgAvg, TmgStdev))
tempCluster = 10**(__rand_norm(TmgConst + TmgSlope*halo_mass, TmgError))
temperature = max(tempFlat, tempCluster) # take the highest temperature out of the flat profile and cluster model
else:
raise Exception('Either the halo mass or temperature must be provided as model inputs.')
# determine initial value of density parameter given gas mass and density profile
if not rho0Value == None:
# determine density parameter in the core
k0Value = rho0Value*new_regions[0]**new_betas[0]
# extend first beta region to a radius of zero
new_regions[0] = 0
elif not halo_mass == None:
# extend first beta region to a radius of zero
new_regions[0] = 0
# find relative values (i.e. to 1) of density parameter in each beta region
kValues = __DensityParameter(nregions, 1.0, new_betas, new_regions)
# determine density parameter in the core
k0Value = __k0ValueFinder(rVir, gasMass, nregions, new_betas, new_regions, kValues)
else:
raise Exception('Either the halo mass or core density must be provided as model inputs.')
# find values of density parameter in each beta region
kValues = __DensityParameter(nregions, k0Value, new_betas, new_regions)
# call RadioSourceEvolution function to calculate Dt tracks
return __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, new_betas, new_regions, kValues, temperature, jet_lorentz, aj_star, jet_angle, axis_exponent, fill_factor)
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityProfiler(rVir, nregions, rand_profile):
# instantiate variables
betas, regions = np.zeros(nregions), np.zeros(nregions)
# set values of Vikhlinin model parameters
if rand_profile == False:
alpha = alphaAvg
betaPrime = betaPrimeAvg
gammaPrime = gammaPrimeAvg # this value has no uncertainty
epsilon = epsilonAvg
rCore = rCoreAvg
rSlope = rSlopeAvg
else:
alpha = __rand_norm(alphaAvg, alphaStdev)
betaPrime = __rand_norm(betaPrimeAvg, betaPrimeStdev)
gammaPrime = __rand_norm(gammaPrimeAvg, gammaPrimeStdev) # this value has no uncertainty
epsilon = __rand_norm(epsilonAvg, epsilonStdev)
rCore = __rand_norm(rCoreAvg, rCoreStdev)
rSlope = __rand_norm(rSlopeAvg, rSlopeStdev)
# set minimum and maximum radius for density profile to be matched
rmin = rCutoff*rVir
rmax = rVir
# use logarithmic radius scale
r = rmin
ratio = (rmax/rmin)**(1./(nregions)) - 1
for count in range(0, nregions):
# set radius at low end of region
rlow = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoLow = np.sqrt((rlow/(rCore*rVir))**(-alpha)/((1 + rlow**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rlow**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# increment radius
dr = r*ratio
r = r + dr
# set radius at high end of region
rhigh = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoHigh = np.sqrt((rhigh/(rCore*rVir))**(-alpha)/((1 + rhigh**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rhigh**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# set value of innermost radius of each beta region
if count == 0:
# extend first beta region to a radius of zero
regions[count] = 0
else:
regions[count] = rlow
# calculate exponent beta for each region to match density profile, ensuring beta is less than 2
if (-np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh) < betaMax):
betas[count] = -np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh)
else:
# ensure beta is less than (or equal to) 2
betas[count] = betaMax
# set this count to be the number of distinct regions
nregions = count + 1
break
return nregions, betas, regions
# find values of density parameter in each beta region
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityParameter(nregions, k0Value, betas, regions):
# instantiate variables
kValues = np.zeros(nregions)
# calculate density parameters in each region
for count in range(0, nregions):
# match tracks between regions `a' and `b'
if count > 0:
# find replicating core density in region `b' required to match pressures and times
kValues[count] = kValues[count - 1]*regions[count]**(betas[count] - betas[count - 1])
# if first region, set initial value of replicating core density as actual core density
else:
kValues[count] = k0Value
return kValues
# determine value of the density parameter at the core given gas mass and density profile
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __k0ValueFinder(rVir, gasMass, nregions, betas, regions, kValues):
# set volume to zero initially
volume = 0
# calculate weighted volume integral using by analytically integraing the volume in each beta region
for count in range(0, nregions):
# set lower bound of analytic integral
rlow = regions[count]
# set upper bound of analytic integral
if (count + 1 == nregions):
rhigh = rVir
else:
rhigh = regions[count + 1]
# increment total weighted volume by weigthed volume of this region
volume = volume + 4*np.pi*(kValues[count]/kValues[0])/(3 - betas[count])*(rhigh**(3 - betas[count]) - rlow**(3 - betas[count]))
# calculate density parameter at the core from stellar mass and weighted volume
k0Value = gasMass/volume
return k0Value
# random normal with values truncated to avoid sign changes
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rand_norm(mean, stdev):
rand_number = np.random.normal(mean, stdev)
while (mean*rand_number < 0 or np.abs(rand_number - mean) > 2*stdev):
rand_number = np.random.normal(mean, stdev)
return rand_number
# gas fraction-halo mass relationship
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __HalogasfracFunction(halo_mass, redshift):
return max(halogasfracCONST1z0 + halogasfracCONST1z1*redshift, halogasfracCONST2z0 + halogasfracCONST2z1*redshift) + halogasfracSLOPE*(halo_mass - 14) + SAGEdensitycorr # in log space
# gas fraction-halo mass relationship error
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __dHalogasfracFunction(halo_mass, redshift):
return dhalogasfracz0 + dhalogasfracz1*redshift # in log space
## Define functions required for RAiSE dynamical evolution
# function to calculate dynamical evolution of lobe and shocked shell
def __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, betas, regions, kValues, temperature, jet_lorentz, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# convert jet power and source age to correct units
QavgValue = 10**jet_power/2. # set the power of *each* jet; convert from log space
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = np.array([10**source_age*year])
tActive = 10**active_age*year
# calculate angle of current radial line
angles = np.arange(0, nangles, 1).astype(np.int_)
dtheta = (np.pi/2)/nangles
theta = dtheta*(angles + 0.5)
# calculate opening angle of jet
open_angle = (jet_angle*np.pi/180)/(axis_ratio/2.83)
# evaluate the translation coefficients eta_c and eta_s
eta_c = 1./np.sqrt(axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_s = 1./np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
# evaluate the translation coefficient zeta_s/eta_s at t -> infinity
zetaeta = np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)/np.sqrt(axis_ratio**(4*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_c[0], eta_s[0], zetaeta[0] = 1., 1., 1,
# calculate the differential volume element coefficient chi
dchi = 4*np.pi/3.*np.sin(theta)*np.sin(dtheta/2.)
# solve RAiSE dynamics iteratively to find thermal component of lobe pressure
if jet_lorentz > 1:
# run code in strong-shock limit to calibrate initial velocity
x_time = 10**10.14*year
_, _, _, _, _, _, _, critical_point_1 = __RAiSE_runge_kutta(QavgValue, np.array([x_time]), x_time, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed, strong_shock=True)
# run code for full RAiSE HD dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point_3 = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed*critical_point_1[2]/critical_point_1[3])
else:
# run code for RAiSE X dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, _ = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue)
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda
# function to apply Runge-Kutta method and extract values at requested time steps
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_runge_kutta(QavgValue, source_age, active_age, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=0., strong_shock=False):
# instantiate variables
X, P = np.zeros((nangles, 5)), np.zeros((nangles, 4))
critical_point = np.zeros(4)
regionPointer = np.zeros(nangles).astype(np.int_)
lobe_minor, lambda_crit, alphaP_denv, alpha_lambda = np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age))
lobe_lengths, shock_lengths, shock_pressures = np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age)))
# calculate injection ages to derive time-average power-law indices for external pressure and filling factor
inject_age = np.zeros(2*len(source_age))
inject_axis_ratios, inject_pressures, inject_lambdas = np.zeros(2*len(source_age)), np.zeros(2*len(source_age)), np.zeros(2*len(source_age))
for timePointer in range(0, len(source_age)):
inject_age[2*timePointer:2*(timePointer + 1)] = np.asarray([crit_age*source_age[timePointer], source_age[timePointer]])
inject_index = np.argsort(inject_age) # sort ages in ascending order
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
i = 0
for timePointer in range(0, len(source_age)):
# set initial conditions for each volume element
if timePointer == 0:
# calculate initial time and radius for ODE
FR2time = limTime
if jet_lorentz > 1:
FR2radius = bulk_velocity*limTime
FR2velocity = bulk_velocity # eta_R is very large
else:
FR2radius = np.sqrt(1 - 1./100**2)*c_speed*limTime
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# test if this radius is above start of second region boundary
if (regions[1] < FR2radius):
FR2radius = regions[1]
if jet_lorentz > 1:
FR2time = regions[1]/bulk_velocity
FR2velocity = bulk_velocity
else:
FR2time = regions[1]/(np.sqrt(1 - 1./100**2)*c_speed)
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# calculate the initial jet/shock shell radius and velocity for each angle theta
X[angles,0] = FR2time
X[angles,1] = FR2radius*eta_s
X[angles,2] = FR2velocity*eta_s
if jet_lorentz > 1:
X[0,3], X[angles[1:],3] = bulk_lorentz, 1./np.sqrt(1 - (FR2velocity*eta_s[angles[1:]]/c_speed)**2)
else:
X[0,3], X[angles[1:],3] = 100, 100*eta_s[angles[1:]]
X[angles,4] = -1 # null value
# set region pointer to first (non-zero) region if smaller than FR2 radius
index = regions[1] < X[angles,1]
regionPointer[index] = 1
regionPointer[np.logical_not(index)] = 0
# calculate fraction of jet power injected into each volume element
injectFrac = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac = injectFrac/np.sum(injectFrac) # sum should be equal to unity
# solve ODE to find radius and pressue at each time step
while (X[0,0] < source_age[timePointer]):
while (X[0,0] < inject_age[inject_index[i]]):
# calculate the appropriate density profile for each angle theta
for anglePointer in range(0, nangles):
while (regionPointer[anglePointer] + 1 < nregions and X[anglePointer,1] > regions[regionPointer[anglePointer] + 1]):
regionPointer[anglePointer] = regionPointer[anglePointer] + 1
# check if next step passes time point of interest
if (X[0,0]*stepRatio > inject_age[inject_index[i]]):
step = inject_age[inject_index[i]] - X[0,0]
else:
step = X[0,0]*(stepRatio - 1)
# update estimates of time, radius and velocity
__rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,3] = np.maximum(1, X[:,3])
# find location of jet--lobe transition
critical_point[0], critical_point[1], critical_point[2], critical_point[3] = X[0,0], X[0,1], X[0,2]*X[0,3], X[0,4]
# record axis ratio, external pressure and filling factor and injection times
if P[-1,0] > 0:
inject_axis_ratios[inject_index[i]] = 1./(P[0,0]/P[-1,0])**2 # inverted to match alpha_lambda definition
else:
inject_axis_ratios[inject_index[i]] = 1
inject_pressures[inject_index[i]] = P[0,2]
inject_lambdas[inject_index[i]] = P[0,3]
# update injection age if not a requested source age
if inject_age[inject_index[i]] < source_age[timePointer]:
i = i + 1
# calculate the lobe and shocked shell length, shock pressure and total pressure as a function of angle
lobe_lengths[angles,timePointer] = P[angles,0]
shock_lengths[angles,timePointer] = X[angles,1]
shock_pressures[angles,timePointer] = P[angles,1]
lambda_crit[timePointer] = P[0,3]
# calculate lobe minor axis (associated with dimensions of shocked shell) at this time step
lobe_minor[timePointer] = X[-1,1]*eta_c[-1]/(shockRadius*eta_s[-1])
# calculate the slope of external pressure profile at this time step
if inject_pressures[inject_index[2*timePointer]] <= 0:
alphaP_denv[timePointer] = 0
else:
alphaP_denv[timePointer] = np.log(inject_pressures[2*timePointer + 1]/inject_pressures[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer])
if inject_lambdas[2*timePointer] <= 0:
alpha_lambda[timePointer] = 1e9 # no emission from this injection time
else:
alpha_lambda[timePointer] = np.log(inject_lambdas[2*timePointer + 1]/inject_lambdas[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) + np.log(inject_axis_ratios[2*timePointer + 1]/inject_axis_ratios[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) # filling factor and changing volume/axis ratio
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point
# Runge-Kutta method to solve ODE in dynamical model
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# instantiate variables
Y, K1, K2, K3, K4 = np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5))
# fouth order Runge-Kutta method
__xpsys(X, K1, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K1[:,:]
__xpsys(Y, K2, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K2[:,:]
__xpsys(Y, K3, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K3[:,:]
__xpsys(Y, K4, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,:] = X[:,:] + (step/6.)*(K1[:,:] + 2*K2[:,:] + 2*K3[:,:] + K4[:,:])
# coupled second order differential equations for lobe evolution
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __xpsys(X, f, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# Differential equations for X[0,1,2,3,4] = (time, radius, velocity, lorentz_factor, thermal_velocity)
# Additional variable for P[0,1,2,3] = (lobe_length, lobe_pressure, external_pressure, lambda_crit)
f[angles,0] = 1.
f[angles,1] = X[angles,2]
# test if the AGN is active at this time-step
if (X[0,0] <= active_age):
active_jet = 1
else:
active_jet = 0
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
# TWO-PHASE FLUID
if jet_lorentz > 1:
# calculate the lobe formation scale
eta_R = QavgValue*bulk_lorentz**2/(2*np.pi*kValues[regionPointer[0]]*(bulk_lorentz*bulk_velocity)*(bulk_lorentz - 1)*c_speed**2*(1 - np.cos(open_angle))*X[0,1]**(2 - betas[regionPointer[0]]))
# calculate lambda_crit
#if (eta_R/bulk_lorentz**2) > 1:
# lambda_crit = 0
#else:
# lambda_crit = 1
lambda_crit = np.exp(-(eta_R/bulk_lorentz**2)/(2*np.log(2)))
P[0,3] = lambda_crit
else:
P[0,3] = 1
# ACCELERATION
# update fraction of jet power injected into each volume element
injectFrac_new = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac_new = injectFrac/np.sum(injectFrac) # sum should be equal to unity
if jet_lorentz > 1:
injectFrac[angles] = (1 - lambda_crit)*injectFrac_new + lambda_crit*injectFrac # keep static at late times
else:
injectFrac[angles] = injectFrac_new[angles]
# acceleration of jet-head
if jet_lorentz > 1:
jet_acceleration = (betas[regionPointer[0]] - 2)*bulk_velocity*X[0,2]/(2*X[0,1]*(1 + eta_R**(-1./2))**2*eta_R**(1./2))
# acceleration of lobe (supersonic/subsonic)
if jet_lorentz > 1 and strong_shock == True:
f[angles,2] = np.minimum((gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)), (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,2]*X[angles,3]/(X[0,0] + year)) # ensure model doesn't run slower than limit due to numerics
elif jet_lorentz > 1:
f[angles,2] = (gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)) - (3*gammaCValue - betas[regionPointer[angles]])*(k_B*temperature/maverage)/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*(X[angles,3]*zetaeta[angles])**2)
else:
sub_angles = (X[angles,2]*X[angles,3]*zetaeta)**2/(gammaX*(k_B*temperature/maverage)) <= 1
super_angles = np.logical_not(sub_angles)
f[super_angles,2] = (gammaX + 1)*(gammaCValue - 1)*injectFrac[super_angles]*(QavgValue*active_jet)*X[super_angles,1]**(betas[regionPointer[super_angles]] - 3)/(2*X[super_angles,2]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*dchi[super_angles]*(X[super_angles,3]*zetaeta[super_angles])**2*kValues[regionPointer[super_angles]]) + (betas[regionPointer[super_angles]] - 3*gammaCValue)*(X[super_angles,2])**2/(2*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)) + (gammaX - 1)*(3*gammaCValue - betas[regionPointer[super_angles]])*(k_B*temperature/maverage)/(4*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*(X[super_angles,3]*zetaeta[super_angles])**2)
f[sub_angles,2] = (betas[regionPointer[sub_angles]] - 2)*(X[sub_angles,2])**2/X[sub_angles,1]
# combine acceleration from jet-head and lobe as two-phase fluid
if jet_lorentz > 1:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,2], f[angles[1:],2] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],2] = X[0,2]*eta_s[angles[1:]]
else:
f[0,2], f[angles[1:],2] = (1 - lambda_crit)*jet_acceleration + lambda_crit*f[0,2], (1 - lambda_crit)*jet_acceleration*eta_s[angles[1:]] + lambda_crit*f[angles[1:],2]
# calculate Lorentz factor of two-phase fluid
f[angles,3] = X[angles,3]**3*X[angles,2]*f[angles,2]/c_speed**2
# PRESSURES
# external pressure at each volume element
P[angles,2] = kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# set velocity associated with thermal component of lobe perssure
if jet_lorentz > 1 and critical_velocity > 0:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,4], f[angles[1:],4] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],4] = X[0,4]*eta_s[angles[1:]]
else:
f[angles,4] = (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,4]/(X[0,0] + year)
else:
X[angles,4], f[angles,4] = X[angles,2]*X[angles,3], f[angles,2]
# jet/lobe pressure at each volume element
volume = X[angles,1]**3*dchi[angles]
if jet_lorentz > 1:
# calculate lobe pressure
P[angles,1] = zetaeta[angles]**2*kValues[regionPointer[angles]]*X[angles,1]**(-betas[regionPointer[angles]])*(np.minimum(X[angles,2], X[angles,4]))**2 + kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
else:
# calculate lobe pressure
P[super_angles,1] = 2./(gammaX + 1)*zetaeta[super_angles]**2*kValues[regionPointer[super_angles]]*X[super_angles,1]**(-betas[regionPointer[super_angles]])*(X[super_angles,2]*X[super_angles,3])**2 - (gammaX - 1)/(gammaX + 1)*kValues[regionPointer[super_angles]]*(k_B*temperature/maverage)*X[super_angles,1]**(-betas[regionPointer[super_angles]])
P[sub_angles,1] = P[sub_angles,2]
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
# AXIS RATIO
if jet_lorentz > 1:
# calculate total mass of particles from the jet
particle_mass = QavgValue*np.minimum(active_age, X[0,0])/((bulk_lorentz - 1)*c_speed**2)
# calculate volume occupied by particles expanding at sound speed and maximum fillable volume within shocked shell
jet_sound = c_speed*np.sqrt(gammaJ - 1)
particle_volume = particle_mass/(gammaJ*pressure/jet_sound**2) # mass / density
shell_volume = np.sum(volume*eta_c/(shockRadius*eta_s))
# calculate (optimal) lobe volume as weighted sum of particle volume and maximum fillable volume (i.e. enable sound speed to reduce as lobe approaches size of shocked shell)
lobe_volume = 1./(1./(particle_volume/fill_factor)**axis_exponent + 1./(shell_volume)**axis_exponent)**(1./axis_exponent)
# find axis ratio for an ellipsoidal lobe
if lobe_volume > 0 and lambda_crit >= lambda_min:
lobe_axis_ratio = np.minimum(np.sqrt(2*np.pi*(X[0,1]/shockRadius)**3/(3*lobe_volume)), 1/np.tan(open_angle))
else:
lobe_axis_ratio = 1/np.tan(open_angle)
# update lobe length along let axis and axis ratio of shocked shell
P[0,0] = X[0,1]/shockRadius
# calculate geometry of each angular volume element
dtheta = (np.pi/2)/len(angles)
theta = dtheta*(angles + 0.5)
lobe_eta_c = 1./np.sqrt(lobe_axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
# set length of lobe along each angular volume element
P[angles[1:],0] = np.minimum(lobe_eta_c[angles[1:]]*P[0,0], X[angles[1:],1]*eta_c[angles[1:]]/(shockRadius*eta_s[angles[1:]])) # second condition should rarely be met
else:
# set length of lobe along each angular volume element
P[0,0], P[angles[1:],0] = X[0,1]/shockRadius, X[angles[1:],1]*eta_c[angles[1:]]/(shockRadius*eta_s[angles[1:]])
## Define functions to download and preprocess particles from hydrodynamical simulations
def __PLUTO_particles(particle_data_path):
# unpack particle data from hydrodynamical simulations
particle_dict = h5py.File(os.path.join(os.path.dirname(os.path.realpath(__file__)), particle_data_path), 'r')
# store variables at desired resolution
time = particle_dict['time'][:].astype(np.float32)
shock_time = particle_dict['tinject'][:,:].astype(np.float32)
major = particle_dict['major'][:].astype(np.float32)
minor = particle_dict['minor'][:].astype(np.float32)
x1 = particle_dict['x1'][:,:].astype(np.float32)
x2 = particle_dict['x2'][:,:].astype(np.float32)
x3 = particle_dict['x3'][:,:].astype(np.float32)
tracer = particle_dict['tracer'][:,:].astype(np.float32)
vx3 = particle_dict['vx3'][:,:].astype(np.float32)
volume = particle_dict['volume'][:,:].astype(np.float32)
pressure = particle_dict['pressure'][:,:].astype(np.float32)
press_minor = particle_dict['pressminor'][:].astype(np.float32)
alphaP_hyd = particle_dict['alphaP'][:,:].astype(np.float32)
alphaP_henv = particle_dict['alphaPenv'][:,:].astype(np.float32)
hotspot_ratio = particle_dict['hotspotratio'][:].astype(np.float32)
return time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio
## Define functions to add emissivity from particles in hydrodynamical simulations on top of dynamics
# function to manage orientation and distribution of particles from simulation output
def __RAiSE_emissivity(frequency, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age, equipartition, spectral_index, gammaCValue=5./3, lorentz_min=Lorentzmin, resolution='standard'):
# determine spatial resolution of particles; i.e. overdensity of particles to include in calculations
if resolution == 'best':
nsamples = 2048
elif resolution == 'high':
nsamples = 512
elif resolution == 'standard':
nsamples = 128
elif resolution == 'poor':
nsamples = 32
else:
raise Exception('Unrecognised keyword for particle resolution. The accepted keywords are: best, high, standard and poor.')
# randomly generate viewing time in the simulated source age
timePointer = np.arange(0, nsamples).astype(np.int_)%len(time)
# convert frequency, equipartition factor and spectral index to correct units
if isinstance(frequency, (list, np.ndarray)):
rest_frequency = np.zeros_like(frequency)
inverse_compton = np.zeros_like(frequency).astype(np.int_)
for freqPointer in range(0, len(frequency)):
rest_frequency[freqPointer] = 10**frequency[freqPointer]*(1 + redshift)
if rest_frequency[freqPointer] > 1e12: # assume frequencies greater than 1000 GHz are inverse-Compton
inverse_compton[freqPointer] = 1
else:
rest_frequency = [10**frequency*(1 + redshift)]
if rest_frequency[freqPointer] > 1e12: # assume frequencies greater than 1000 GHz are inverse-Compton
inverse_compton = [1]
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = [10**source_age*year]
tActive = 10**active_age*year
equi_factor = 10**float(-np.abs(equipartition)) # ensure sign is correct
s_index = 2*float(np.abs(spectral_index)) + 1 # ensure sign is correct
# derive redshift dependent ancillary variables used by every analytic model
Ks = __RAiSE_Ks(s_index, gammaCValue, lorentz_min)
blackbody = __RAiSE_blackbody(s_index)
return __RAiSE_particles(timePointer, rest_frequency, inverse_compton, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, tFinal, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, tActive, equi_factor, s_index, gammaCValue, lorentz_min, Ks, blackbody)
# function to calculate emissivity from each particle using RAiSE model
@jit(nopython=True, parallel=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_particles(timePointer, rest_frequency, inverse_compton, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, tFinal, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, tActive, equi_factor, s_index, gammaCValue, lorentz_min, Ks, blackbody):
# instantiate variables
luminosity = np.zeros((len(tFinal), len(timePointer)*len(pressure[:,0]), len(rest_frequency)))
magnetic_field = np.zeros((len(tFinal), len(rest_frequency)))
magnetic_particle, magnetic_weighting = np.zeros((len(tFinal), len(timePointer), len(rest_frequency))), np.zeros((len(tFinal), len(timePointer), len(rest_frequency)))
location = np.zeros((len(tFinal), len(timePointer)*len(pressure[:,0]), 3))
# derive emissivity at each time step
for i in range(0, len(tFinal)):
# derive emissivity for random variations in particle distribution
for j in range(0, len(timePointer)):
# SHOCK ACCELERATION TIMES
new_shock_time = shock_time[:,timePointer[j]]*(tFinal[i]/time[timePointer[j]])*np.minimum(1., (tActive/tFinal[i])) # scale the last acceleration time to active age if source is a remnant
# PRESSURES
new_pressure = pressure[:,timePointer[j]]*(shock_pressures[-1,i]/press_minor[timePointer[j]]) # correction factor to match Model A
# correct the hotspot/lobe pressure ratio based on the dynamical model
new_pressure = new_pressure*((shock_pressures[0,i]/shock_pressures[-1,i])/hotspot_ratio[timePointer[j]] - 1)*(np.abs(x3[:,timePointer[j]])/major[timePointer[j]]) + new_pressure # increase log-space pressure linearly along lobe
# correct the evolutionary histories of the particles based on the dynamical model
alphaP_dyn = np.maximum(-2, np.minimum(0, alphaP_denv[i] + alphaP_hyd[:,timePointer[j]] - alphaP_henv[:,timePointer[j]]))
# VOLUMES
volume_fraction = volume[:,timePointer[j]]/(4*np.pi/3.*major[timePointer[j]]*minor[timePointer[j]]**2)
#volume_sum = np.nansum(volume_fraction[~np.isinf(volume_fraction)])
# cap the largest volumes at the 95th percentile to outliers in surface brightness map; minimal effect on total luminosity
volume_fraction[volume_fraction > np.nanpercentile(volume_fraction, 95)] = np.nanpercentile(volume_fraction, 95)
new_volume = volume_fraction*(4*np.pi/3.*lobe_lengths[0,i]*lobe_minor[i]**2)*tracer[:,timePointer[j]] #/volume_sum
# RELATIVISTIC BEAMING
doppler_factor = np.sqrt(np.maximum(1e-6, 1 - vx3[:,timePointer[j]]**2))**(3 - (s_index - 1)/2.) # Doppler boosting of particles in jet; 1e-6 ensures some very low level emission
doppler_factor[np.logical_and(np.abs(x3[:,timePointer[j]])/major[timePointer[j]] < 0.1, np.logical_and(np.abs(x1[:,timePointer[j]])/major[timePointer[j]] < 0.01, np.abs(x2[:,timePointer[j]])/major[timePointer[j]] < 0.01))] = 0 # completely remove very bright particles clumped at start of jet
# LOBE PARTICLES
# find angle and radius of each particle from core
new_angles = np.arctan((np.sqrt(x1[:,timePointer[j]]**2 + x2[:,timePointer[j]]**2)*lobe_minor[i]/minor[timePointer[j]])/(x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]])) # rescale axes to correct axis ratio
new_radii = np.sqrt((x1[:,timePointer[j]]**2 + x2[:,timePointer[j]]**2)*(lobe_minor[i]/minor[timePointer[j]])**2 + (x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]])**2)/lobe_lengths[0,i]
# find particles within lobe region; particles outside this region will not emit. Particle map is set to axis ratio based on shocked shell to maintain geometry of jet
new_eta_c = 1./np.sqrt((lobe_lengths[0,i]/lobe_lengths[-1,i])**2*(np.sin(new_angles))**2 + (np.cos(new_angles))**2)
lobe_particles = np.zeros_like(x1[:,timePointer[j]])
lobe_particles[np.abs(vx3[:,timePointer[j]]) > 1./np.sqrt(3)] = 1 # assume sound speed is critical value for relativisitic particles
lobe_particles[new_radii < new_eta_c] = 1.
# TWO PHASE FLUID
# fraction of jet particles that have reached location in lobe
two_phase_weighting = np.maximum(0, np.minimum(1, lambda_crit[i]*(new_shock_time/np.minimum(tActive, tFinal[i]))**np.maximum(0, alpha_lambda[i])))
if tActive/tFinal[i] >= 1:
# keep jet particles visible at all times
two_phase_weighting = np.maximum(two_phase_weighting, np.minimum(1, np.abs(vx3[:,timePointer[j]]*np.sqrt(3)))) # assume sound speed is critical value for relativisitic particles
else:
# suppress emission from jet particle
two_phase_weighting = np.minimum(two_phase_weighting, 1 - np.minimum(1, np.abs(vx3[:,timePointer[j]]*np.sqrt(3))))
# PARTICLE EMISSIVITY
for k in range(0, len(rest_frequency)):
if rest_frequency[k] > 100:
# calculate losses due to adiabatic expansion, and synchrotron/iC radiation
lorentz_ratio, pressure_ratio = __RAiSE_loss_mechanisms(rest_frequency[k], inverse_compton[k], redshift, tFinal[i], new_shock_time, new_pressure, alphaP_dyn, equi_factor, gammaCValue)
# calculate luminosity associated with each particle
temp_luminosity = None
if inverse_compton[k] == 1:
# inverse-Compton
sync_frequency = (3*e_charge*rest_frequency[k]*np.sqrt(2*mu0*( equi_factor*new_pressure/((gammaCValue - 1)*(equi_factor + 1)) ))/(2*np.pi*m_e*(freq_cmb*temp_cmb*(1 + redshift)))) # assuming emission at CMB frequency only
temp_luminosity = Ks/blackbody*sync_frequency**((1 - s_index)/2.)*(sync_frequency/rest_frequency[k])*(gammaCValue - 1)*__RAiSE_uC(redshift) * (equi_factor**((s_index + 1)/4. - 1 )/(equi_factor + 1)**((s_index + 5)/4. - 1 ))*new_volume*new_pressure**((s_index + 1 )/4.)*pressure_ratio**(1 - 4./(3*gammaCValue))*lorentz_ratio**(2 - s_index)/len(timePointer) * doppler_factor*lobe_particles*two_phase_weighting
else:
# synchrotron
temp_luminosity = Ks*rest_frequency[k]**((1 - s_index)/2.)*(equi_factor**((s_index + 1)/4.)/(equi_factor + 1)**((s_index + 5)/4.))*new_volume*new_pressure**((s_index + 5)/4.)*pressure_ratio**(1 - 4./(3*gammaCValue))*lorentz_ratio**(2 - s_index)/len(timePointer) * doppler_factor*lobe_particles*two_phase_weighting
# remove any infs
index = np.isinf(temp_luminosity)
temp_luminosity[index] = np.nan
luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k] = temp_luminosity
# calculate luminosity weighted magnetic field strength
magnetic_particle[i,j,k] = np.nansum(np.sqrt(2*mu0*new_pressure*equi_factor/(gammaCValue - 1)*(equi_factor + 1))*luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k])
magnetic_weighting[i,j,k] = np.nansum(luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k])
# PARTICLE PRESSURE
else:
luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k] = new_pressure*lobe_particles
# CARTESIAN LOCATIONS
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),0] = x1[:,timePointer[j]]*lobe_minor[i]/minor[timePointer[j]] *np.sign(timePointer[j]%8 - 3.5)
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),1] = x2[:,timePointer[j]]*lobe_minor[i]/minor[timePointer[j]] *np.sign(timePointer[j]%4 - 1.5)
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),2] = x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]] *np.sign(timePointer[j]%2 - 0.5)
# calculate luminosity weighted magnetic field strength for time step
for k in range(0, len(rest_frequency)):
if np.nansum(magnetic_weighting[i,:,k]) == 0:
magnetic_field[i,k] = 0
else:
magnetic_field[i,k] = np.nansum(magnetic_particle[i,:,k])/np.nansum(magnetic_weighting[i,:,k])
return location, luminosity, magnetic_field
# find ratio of the lorentz factor and the pressure at the time of acceleration to that at the time of emission
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_loss_mechanisms(rest_frequency, inverse_compton, redshift, time, shock_time, pressure, alphaP, equipartition, gammaCValue=5./3):
# calculate lorentz factor at time of emission
if inverse_compton == 1:
# inverse-Compton
lorentz_factor = np.sqrt(rest_frequency/(freq_cmb*temp_cmb*(1 + redshift)))*np.ones(len(pressure)) # assuming emission at CMB frequency only
else:
# synchrotron
lorentz_factor = np.sqrt(2*np.pi*m_e*rest_frequency/(3*e_charge*np.sqrt(2*mu0*pressure/(gammaCValue - 1)*(equipartition/(equipartition + 1))))) # assuming emission at Larmor frequency only
# calculate pressure and volume at time of acceleration
pressure_inject = pressure*(shock_time/time)**alphaP
# calculate RAiSE constant a2
a2 = __RAiSE_a2(redshift, time, shock_time, pressure, pressure_inject, equipartition, alphaP, gammaCValue)
# calculate lorentz factor at time of acceleration, and remove invalid points
lorentz_inject = lorentz_factor*shock_time**(alphaP/(3*gammaCValue))/(time**(alphaP/(3*gammaCValue)) - a2*lorentz_factor) # second time is i becasue is time_high
lorentz_inject[lorentz_inject < 1] = np.nan
return lorentz_inject/lorentz_factor, pressure_inject/pressure
# find RAiSE constant a2 for synchrotron and iC radiative losses
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_a2(redshift, time, shock_time, pressure, pressure_inject, equipartition, alphaP, gammaCValue=5./3):
return 4*sigma_T/(3*m_e*c_speed)*(pressure_inject/(gammaCValue - 1)*(equipartition/(equipartition + 1))/(1 + alphaP*(1 + 1./(3*gammaCValue)))*shock_time**(-alphaP)*(time**(1 + alphaP*(1 + 1./(3*gammaCValue))) - shock_time**(1 + alphaP*(1 + 1./(3*gammaCValue)))) + __RAiSE_uC(redshift)/(1 + alphaP/(3*gammaCValue))*(time**(1 + alphaP/(3*gammaCValue)) - shock_time**(1 + alphaP/(3*gammaCValue)))) # array is shorter by one element
# find CMB radiation energy density
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_uC(redshift):
uC0 = 0.25*1e6*e_charge # J m-3 CMB energy density at z = 0 (Longair, 1981)
return uC0*(redshift + 1)**4 # assuming uC prop to (z + 1)^4 as in KDA97
# find RAiSE constant K(s) for the absolute scaling of the emissivity
def __RAiSE_Ks(s_index, gammaCValue=5./3, lorentz_min=Lorentzmin):
kappa = (gamma(s_index/4. + 19./12)*gamma(s_index/4. - 1./12)*gamma(s_index/4. + 5./4)/gamma(s_index/4. + 7./4))
return kappa/(m_e**((s_index + 3)/2.)*c_speed*(s_index + 1))*(e_charge**2*mu0/(2*(gammaCValue - 1)))**((s_index + 5)/4.)*(3./np.pi)**(s_index/2.)/((lorentz_min**(2 - s_index) - Lorentzmax**(2 - s_index))/(s_index - 2) - (lorentz_min**(1 - s_index) - Lorentzmax**(1 - s_index))/(s_index - 1))
# find RAiSE blackbody constant to convert cosmic microwave background emission from single frequency to blackbody spectrum
def __RAiSE_blackbody(s_index):
return np.pi**4/(15*gamma((s_index + 5)/2.)*zeta((s_index + 5)/2.))
## Define functions to produce surface brightness maps of radio lobes
# define function to manage the discretisation of particles down to pixels
def __RAiSE_brightness_map(frequency, redshift, source_age, lobe_lengths, location, luminosity, angle, resolution='standard'):
# determine spatial resolution of particles; i.e. overdensity of particles to include in calculations
if resolution == 'best':
npixels = 2048/4
elif resolution == 'high':
npixels = 512/2
elif resolution == 'standard':
npixels = 128/1
elif resolution == 'poor':
npixels = 32*2
else:
raise Exception('Unrecognised keyword for particle resolution. The accepted keywords are: best, high, standard and poor.')
# convert frequency, equipartition factor and spectral index to correct units
if isinstance(frequency, (list, np.ndarray)):
rest_frequency = np.zeros_like(frequency)
for freqPointer in range(0, len(frequency)):
rest_frequency[freqPointer] = 10**frequency[freqPointer]*(1 + redshift)
else:
rest_frequency = [10**frequency*(1 + redshift)]
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = [10**source_age*year]
return __RAiSE_pixels(rest_frequency, redshift, tFinal, lobe_lengths, location, luminosity, angle, npixels)
# define function to discretise particles down to pixels
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_pixels(rest_frequency, redshift, tFinal, lobe_lengths, location, luminosity, angle, npixels):
# instantiate variables to store brightness map variables
x_list = []
y_list = []
brightness_list = []
for i in range(0, len(tFinal)):
x_col = []
y_col = []
brightness_col = []
sim_x, sim_y, sim_z = location[i,:,0], location[i,:,1], location[i,:,2] # x, y, z (i.e. 0, 1, 2) in simulations
for j in range(0, len(rest_frequency)):
# separate location array into components
index = np.logical_and(np.logical_and(np.logical_not(np.isnan(luminosity[i,:,j])), np.logical_not(np.isinf(luminosity[i,:,j]))), np.logical_not(np.isnan(sim_x)))
location_x = np.sin(angle*np.pi/180.)*sim_y[index] + np.cos(angle*np.pi/180.)*sim_z[index]
location_y = sim_x[index]
new_luminosity = luminosity[i,:,j]
new_luminosity = new_luminosity[index]
if len(location_x) > 0:
# discretise particles
location_x = np.floor(location_x/lobe_lengths[0,i]*(npixels//2)).astype(np.int_)
location_y = np.floor(location_y/lobe_lengths[0,i]*(npixels//2)).astype(np.int_)
min_x, min_y = np.min(location_x), np.min(location_y)
location_x = location_x - min_x
location_y = location_y - min_y
# instantiate variables to store discrete particles
x_values = np.arange(np.min(location_x), np.max(location_x) + 0.1, 1).astype(np.int_)
y_values = np.arange(np.min(location_y), np.max(location_y) + 0.1, 1).astype(np.int_)
brightness = np.zeros((len(x_values), len(y_values)))
# add luminosity from each particle to correct pixel
for k in range(0, len(new_luminosity)):
if rest_frequency[j] > 100:
brightness[location_x[k],location_y[k]] = brightness[location_x[k],location_y[k]] + new_luminosity[k]
else:
brightness[location_x[k],location_y[k]] = max(brightness[location_x[k],location_y[k]], new_luminosity[k])
# add x and y pixel values, and brightnesses to arrays
x_col.append((x_values + min_x + 0.5)*lobe_lengths[0,i]/(npixels//2)) # add 0.5 to get pixel centres and scale back to physical dimensions
y_col.append((y_values + min_y + 0.5)*lobe_lengths[0,i]/(npixels//2))
brightness_col.append(brightness)
else:
x_col.append(None)
y_col.append(None)
brightness_col.append(None)
x_list.append(x_col)
y_list.append(y_col)
brightness_list.append(brightness_col)
return x_list, y_list, brightness_list
# Define functions to plot emissivity maps throughout source evolutionary history
def RAiSE_evolution_maps(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, seed=None, rerun=False, cmap='RdPu'):
# function to test type of inputs and convert type where appropriate
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# set up plot
fig, axs = plt.subplots(len(source_age), 1, figsize=(12, 1 + (10/axis_ratio[0] + 0.8)*len(source_age)))
if len(source_age) <= 1: # handle case of single image
axs = [axs]
fig.subplots_adjust(hspace=0)
#cmap = cm.get_cmap('binary')
colour_scheme = cm.get_cmap(cmap)
rc('text', usetex=True)
rc('font', size=14)
rc('legend', fontsize=14)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
for i in range(0, len(source_age)):
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[0] > 0:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), np.abs(np.log10(rho0Value[0])), jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], frequency[0], source_age[i])
else:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), np.abs(np.log10(rho0Value[0])), jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], source_age[i])
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[0] > 0:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), halo_mass[0], jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], frequency[0], source_age[i])
else:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}__t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), halo_mass[0], jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], source_age[i])
# read-in data from file (must be RAiSE output of correct format)
if rerun == False:
try:
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
except:
# run RAiSE HD for set of parameters at requested resolution
RAiSE_run(frequency[0], redshift[0], axis_ratio[0], jet_power[0], source_age[i], halo_mass=halo_mass, rand_profile=rand_profile, betas=betas, regions=regions, rho0Value=rho0Value, temperature=temperature, active_age=active_age[0], jet_lorentz=jet_lorentz[0], equipartition=equipartition[0], spectral_index=spectral_index, gammaCValue=gammaCValue, lorentz_min=Lorentzmin, brightness=True, resolution='best', seed=seed)
dataframe =
|
pd.read_csv(filename+'_best.csv', index_col=0)
|
pandas.read_csv
|
import contextlib
from pathlib import Path
import re
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.excel import (
ExcelWriter,
_OpenpyxlWriter,
)
openpyxl = pytest.importorskip("openpyxl")
pytestmark = pytest.mark.parametrize("ext", [".xlsx"])
def test_to_excel_styleconverter(ext):
from openpyxl import styles
hstyle = {
"font": {"color": "00FF0000", "bold": True},
"borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"},
"fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}},
"number_format": {"format_code": "0.00"},
"protection": {"locked": True, "hidden": False},
}
font_color = styles.Color("00FF0000")
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal="center", vertical="top")
fill_color = styles.Color(rgb="006666FF", tint=0.3)
fill = styles.PatternFill(patternType="solid", fgColor=fill_color)
number_format = "0.00"
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw["font"] == font
assert kw["border"] == border
assert kw["alignment"] == alignment
assert kw["fill"] == fill
assert kw["number_format"] == number_format
assert kw["protection"] == protection
def test_write_cells_merge_styled(ext):
from pandas.io.formats.excel import ExcelCell
sheet_name = "merge_styled"
sty_b1 = {"font": {"color": "00FF0000"}}
sty_a2 = {"font": {"color": "0000FF00"}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {"font": {"color": "000000FF", "bold": True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs["font"]
merge_cells = [
ExcelCell(
col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged
)
]
with tm.ensure_clean(ext) as path:
with _OpenpyxlWriter(path) as writer:
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks["B1"]
xcell_a2 = wks["A2"]
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize("iso_dates", [True, False])
def test_kwargs(ext, iso_dates):
# GH 42286 GH 43445
kwargs = {"iso_dates": iso_dates}
with tm.ensure_clean(ext) as f:
msg = re.escape("Use of **kwargs is deprecated")
with tm.assert_produces_warning(FutureWarning, match=msg):
with ExcelWriter(f, engine="openpyxl", **kwargs) as writer:
assert writer.book.iso_dates == iso_dates
# ExcelWriter won't allow us to close without writing something
DataFrame().to_excel(writer)
@pytest.mark.parametrize("iso_dates", [True, False])
def test_engine_kwargs_write(ext, iso_dates):
# GH 42286 GH 43445
engine_kwargs = {"iso_dates": iso_dates}
with tm.ensure_clean(ext) as f:
with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer:
assert writer.book.iso_dates == iso_dates
# ExcelWriter won't allow us to close without writing something
DataFrame().to_excel(writer)
def test_engine_kwargs_append_invalid(ext):
# GH 43445
# test whether an invalid engine kwargs actually raises
with tm.ensure_clean(ext) as f:
DataFrame(["hello", "world"]).to_excel(f)
with pytest.raises(
TypeError,
match=re.escape(
"load_workbook() got an unexpected keyword argument 'apple_banana'"
),
):
with ExcelWriter(
f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"}
) as writer:
# ExcelWriter needs us to write something to close properly
DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2")
@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")])
def test_engine_kwargs_append_data_only(ext, data_only, expected):
# GH 43445
# tests whether the data_only engine_kwarg actually works well for
# openpyxl's load_workbook
with
|
tm.ensure_clean(ext)
|
pandas._testing.ensure_clean
|
import sys
sys.path.append('../mss')
import matplotlib.pyplot as plt
import visreader as mvis
import mssmain as mss
import pandas as pd
import numpy as np
from tqdm import tqdm
def mz_selector(scans, mz_list, export_name):
sample_list = []
for mz in tqdm(mz_list):
rt, i = ms_chromatogram_list(scans, mz, 20)
count = 0
for ints in i:
if ints >= 5000:
count += 1
if count == 7:
sample_list.append([mz, i])
break
else:
count = 0
continue
d_sample = pd.DataFrame(sample_list)
d_rt = pd.DataFrame(rt)
writer =
|
pd.ExcelWriter(export_name, engine='xlsxwriter')
|
pandas.ExcelWriter
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["<NAME>", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected =
|
Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
|
pandas.Series
|
############ TAGS TO BE USED WITH EXTENSION "BETTER COMMENTS"
# $ TÍTULO / DONE
# & Subtítulo
# ! Warning
# * Demo
# ? Duda/aclaración
# % To do
# x Borrado
############
import sys
import os
import random
import ctypes
import pandas
import numpy
import requests
import json
import time
import threading
from datetime import datetime, timedelta
from PIL import Image
import glob
from PyQt5 import QtGui
from PyQt5.QtCore import QDateTime, Qt, QTimer, QEventLoop, QDir
from PyQt5.QtWidgets import (QApplication, QComboBox,QDialog, QGridLayout, QGroupBox, QLabel, QLineEdit,
QPushButton, QTextEdit, QVBoxLayout, QHBoxLayout, QWidget, QMessageBox, QSpinBox, QDesktopWidget,
QCheckBox, QFileDialog, QTabWidget, QSizePolicy)
#? README
#? Intervalo entre 0 y 99 segundos
#? Max no. de trials = 99
#? To export, add the ".csv" extension
#? For Neulog, you need to open Neulog API
#$ TODO:
#! Sharing code
#! Dynamic "path_REG_dll"
#! Diseño
#! Agregar imagen de diseño experimental
#! Add Z0 a Phys Data, a lado de Trial e Instances
#! Add info
#! Add manual
#! Adaptar exports
#! Cerrar puertas de errores
#! Don't close if RNG is not selected
#! Don't close if error at exporting
#! Don't close if statistical analysis can't be realized
#! Don't close if no library is selected
#! Alertar si no se ha elegido banco de imagenes
#! no salir si no hay port en neulog
#! Revisar otras "#!"
#! Evitar que haya diferente sample rate entre neulogs (no se puede y rompe)
#! Solucionar on-demand procedure; rompería el sample calculado para el neulog
#! Compilar con https://pypi.org/project/auto-py-to-exe/
#! LOW PRIORITY:
#! Falta agregar "Opened", "GetBits", "GetBytes" y "APIVersion" a la clase PsyREG
#! Include FOR loop for each psyleron connected en "click_refresh_sources"
#! agregar dimensiones de análisis (Ej. Miedo[Muerte-Peligro, Animales-Lesiones, etc.])
#$ WISHLIST:
#! Separación entre hombres y mujeres
#! Seleccionar estímulos visuales
#$ Images (DONE)
#! Light flashes
#! Seleccionar estímulos auditivos
#! Estruendos
#! Silencio
#! Ruido blanco
#! Physiological sensors
#! Neulog
#! Emotiv
#! Vernier Go-Direct
#! BIOPAC
################################3
class Pseudo_RNG():
def __init__(self):
self.name = "Pseudo-RNG"
def get_bits(self, maxbts):
str_list = []
for x in range(maxbts):
str_list.append(random.randrange(0,2))
str_bits = ''.join(str(x) for x in str_list)
# print(str_bits)
return str_bits
class PsyREG():
def __init__(self):
#? Define path of DLL
self.path_REG_dll = os.path.join(os.getcwd(), '.\Presentimiento\PsyREG.dll') # DLL file path
# self.path_REG_dll = r'C:\Users\ramse\Python_ENVS\Presentimiento\PsyREG.dll' # DLL file path
self.REG_dll = ctypes.CDLL(self.path_REG_dll) # load DLL
#? Define variables
PSYREG_API_VERSION = 1 # Version of the API that this header is indended for. Should be compared to PsyREGAPIVersion() */
INVALID_DATASOURCE = -1 # Constant representing an invalid Datasource */
BSS_GOOD = 0x0000 # no flags set. the device is ok and there are no problems */
BSS_CONNECTING = 0x0001 # device connection is being established (in the process of opening) */
BSS_WAITING = 0x0002 # waiting for device data (buffer empty) */
BSS_BUSY = 0x0004 # device is in use by another application */
BSS_NODEVICE = 0x0008 # there is no device by this name connected anymore */
BSS_READERROR = 0x0010 # was there a read error during the last read */
BSS_BADCFG = 0x0020 # was there a bad configuration for the device (e.g. conflicting values or unset values) */
BSS_CANTPROCESS = 0x0040 # was there a processing error? [set at bitsource level] */
BSS_INITERROR = 0x0080 # was there an initialization error / problem with the data structure [set at bitsource level] */
BSS_TIMEOUT = 0x0100 # did the reader time out since the last device read [set at bitsource level] */
BSS_GENERALERROR = 0x8000 # was there any error at all. set if any other error (busy, nodevice, readerror, cantprocess) [set at bitsource level] */
BSS_INVALID = 0x0200 # is the DataSource invalid. This occurs when a DataSource was not created or has already been destroyed. */
def get_name(self):
#? Obtain the Type and ID of the Psyleron, and return in a formatted string
self.invoke_RNG()
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetDeviceTypeBSTR.restype = ctypes.c_char_p
self.REG_dll.PsyREGGetDeviceTypeBSTR.argtypes = [ctypes.c_int32]
self.REG_dll.PsyREGGetDeviceIdBSTR.restype = ctypes.c_char_p
self.REG_dll.PsyREGGetDeviceIdBSTR.argtypes = [ctypes.c_int32]
PsyREG_ID = self.REG_dll.PsyREGGetDeviceIdBSTR(source)
PsyREG_ID = PsyREG_ID.decode("utf-8") #Decode from byte to string
PsyREG_Type = self.REG_dll.PsyREGGetDeviceTypeBSTR(source)
PsyREG_Type = PsyREG_Type.decode("utf-8") #Decode from byte to string
name_PsyREG = ("Psyleron %s: %s" % (PsyREG_Type, PsyREG_ID)) # Format string of the name
# print(name_PsyREG)
return name_PsyREG
def get_bits(self, maxbts):
#? Obtain 1 bit of random data; 1 or 0
self.invoke_RNG()
source = self.get_source()
self.open_RNG()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetBit.restype = ctypes.c_int32
self.REG_dll.PsyREGGetBit.argtypes = [ctypes.c_int32,ctypes.POINTER(ctypes.c_ubyte)]
# For loop for x number of MAXBTS stated
str_list = []
for bit_psyreg in range(maxbts):
bit_psyreg = ctypes.c_ubyte()
self.REG_dll.PsyREGGetBit(source, ctypes.byref(bit_psyreg))
str_list.append(bit_psyreg.value)
str_bits = ''.join(str(x) for x in str_list)
# print(str_bits)
return str_bits
def get_bytes(self, maxbts):
#? Obtain 1 byte (between 0 and 255) of random data
self.invoke_RNG()
source = self.get_source()
self.open_RNG()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetByte.restype = ctypes.c_int32
self.REG_dll.PsyREGGetByte.argtypes = [ctypes.c_int32,ctypes.POINTER(ctypes.c_ubyte)]
# For loop for x number of MAXBTS stated
str_list = []
for byte_psyreg in range(maxbts):
byte_psyreg = ctypes.c_ubyte()
self.REG_dll.PsyREGGetByte(source, ctypes.byref(byte_psyreg))
str_list.append(byte_psyreg.value)
str_bytes = ''.join(str(x) for x in str_list)
# print(str_bytes)
return str_bytes
# def get_bits(self,max_bits): ######! NOT WORKING YET
# #? Obtain chunks of bits
# self.invoke_RNG()
# source = self.get_source()
# self.open_RNG()
# # Define all the types of results and arguments in the PsyREG dll functions
# REG_dll.PsyREGGetBits.restype = ctypes.c_int32
# REG_dll.PsyREGGetBits.argtypes = [ctypes.c_int32,ctypes.POINTER(ctypes.c_ubyte),ctypes.c_int32,ctypes.c_int32]
# bits_psyreg = ctypes.c_ubyte()
# REG_dll.PsyREGGetBits(source, ctypes.byref(bits_psyreg), max_bits)
# return bits_psyreg.value
def invoke_RNG(self):
#? Call Psyleron; if it's not called it won't know you're talking to him
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGEnumerateSources.restype = ctypes.c_int32
self.REG_dll.PsyREGEnumerateSources.argtypes = []
PsyREG_EnumerateSources = self.REG_dll.PsyREGEnumerateSources()
return PsyREG_EnumerateSources
def get_source(self):
#? Get source from psyleron; if it's not stated, it won't get data, even if it's called
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGGetSource.restype = ctypes.c_int32
self.REG_dll.PsyREGGetSource.argtypes = [ctypes.c_uint32]
PsyREG_GetSource = self.REG_dll.PsyREGGetSource(0)
return PsyREG_GetSource
def open_RNG(self):
#? Open the stream of data to obtain bits and bytes
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGOpen.restype = ctypes.c_int32
self.REG_dll.PsyREGOpen.argtypes = [ctypes.c_int32]
PsyREG_Open = self.REG_dll.PsyREGOpen(source)
return PsyREG_Open
def close_RNG(self):
#? Closes an open DataSource and prevents further interaction
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGClose.restype = ctypes.c_void_p
self.REG_dll.PsyREGClose.argtypes = [ctypes.c_int32]
PsyREG_Close = self.REG_dll.PsyREGClose(source)
return PsyREG_Close
def release_RNG(self):
#? Releases a given source back to the source manager
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGReleaseSource.restype = ctypes.c_void_p
self.REG_dll.PsyREGReleaseSource.argtypes = [ctypes.c_int32]
PsyREG_Release = self.REG_dll.PsyREGReleaseSource(source)
return PsyREG_Release
def clear_RNG(self):
#? Clears the entire list of sources built by one or more calls to EnumerateSources (invoke_RNG)
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGClearSources.restype = ctypes.c_void_p
self.REG_dll.PsyREGClearSources.argtypes = []
PsyREG_Clear = self.REG_dll.PsyREGClearSources()
return PsyREG_Clear
def reset_RNG(self):
#? Signals that the data in the DataSource internal buffer is stale and performs a clear.
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGReset.restype = ctypes.c_void_p
self.REG_dll.PsyREGReset.argtypes = [ctypes.c_int32]
PsyREG_Reset = self.REG_dll.PsyREGReset(source)
return PsyREG_Reset
def get_status(self):
#? Obtain 0 if status is good, 512 if status is bad
self.invoke_RNG()
source = self.get_source()
# Define all the types of results and arguments in the PsyREG dll functions
self.REG_dll.PsyREGGetStatus.restype = ctypes.c_int32
self.REG_dll.PsyREGGetStatus.argtypes = [ctypes.c_int32]
# Pass functions from PsyREG dll
PsyREG_Status = self.REG_dll.PsyREGGetStatus(source)
return PsyREG_Status
def count_PsyREGs(self):
#? Count number of Psylerons connected
self.invoke_RNG()
# Define all the types of results and arguments in the PsyREG dll function
self.REG_dll.PsyREGGetSourceCount.restype = ctypes.c_uint32
self.REG_dll.PsyREGGetSourceCount.argtypes = []
PsyREG_GetSourceCount = self.REG_dll.PsyREGGetSourceCount(0)
return PsyREG_GetSourceCount
class Neulog():
#! Not implemented: "ResetSensor:[],[]", "SetPositiveDirection:[],[],[]" & "# SetRFID:[]"
#! Can't support more than 20 samples per second for more than 5 minutes
def __init__(self,host,*additional_sensors):
self.name = "Neulog"
self.host = str(host)
self.sensor_id = '1'
self.set_sensors_id()
self.parameters = ':'
# Check if there's more than 1 sensor given as argument
for sensors in additional_sensors:
self.parameters += '[' + sensors + '],[' + self.sensor_id + '],'
self.parameters = self.parameters[:-1]
def get_url(self,command):
# Construct url query
url = 'http://localhost:'+self.host+'/NeuLogAPI?'+command
return url
def get_data_dict(self,url):
# Obtain data_dict from url request from the url request
data_dict = requests.get(url)
# Convert to json object, so it can be used as dictionary
json_data_dict = json.loads(data_dict.text)
return json_data_dict
def set_sensors_id(self):
# Set the ID of the connected sensors
command = 'SetSensorsID'
parameters = ':['+ self.sensor_id +']'
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
return 'All sensors changed the ID to: ' + data_dict[command]
def set_sensor_range(self, sensor_range):
# Change the range of the sensor (GSR: 1 = Arb, 2 = mS; Pulse: 1 = BPM, 2 = Wave[Arb])
command = 'SetSensorRange'
parameters = self.parameters
sensor_range = ',['+ sensor_range +']'
url = self.get_url(command+parameters+sensor_range)
data_dict = self.get_data_dict(url)
return 'Sensor range changed: ' + data_dict[command]
def get_version(self):
# Obtain the version of the Neulog API
command = 'GetServerVersion'
url = self.get_url(command)
data_dict = self.get_data_dict(url)
return 'Neulog API version: ' + data_dict[command]
def get_status(self):
# Get the status of the server (it's wrongly written as 'sever' in the API)
command = 'GetSeverStatus'
url = self.get_url(command)
data_dict = self.get_data_dict(url)
return 'Neulog API status: ' + data_dict[command]
def get_values(self):
# Obtain values from the sensors
command = 'GetSensorValue'
parameters = self.parameters
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
# Obtains the values from the data_dict
data_list = data_dict[command]
return data_list
# print(data_dict[command])
def exp_start(self,sample_rate,sample_size):
# Start the experiment with the defined parameters; an experiment needs to be stopped before starting a new one
command = 'StartExperiment'
parameters = self.parameters + ',[' + sample_rate + '],[' + sample_size + ']'
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
return 'Start Neulog experiment: ' + data_dict[command] + ' at ' + datetime.now().strftime('%H:%M:%S.%f')[:-3]
def exp_stop(self):
# Stops the experiment; an experiment needs to be stopped before starting a new one
command = 'StopExperiment'
url = self.get_url(command)
data_dict = self.get_data_dict(url)
return 'Stopped Neulog experiment: ' + data_dict[command] + ' at ' + datetime.now().strftime('%H:%M:%S.%f')[:-3]
def get_exp_values(self):
# Obtain values of the current or last ran experiment
command = 'GetExperimentSamples'
parameters = self.parameters
url = self.get_url(command+parameters)
data_dict = self.get_data_dict(url)
num = 0
# for each list of data within the dictionary, delete the first 2 elements (sensor_type and sensor_id)
for lists in data_dict[command]:
del data_dict[command][num][:2]
num += 1
# return list of lists of each sensor_type with only the values recorded
return data_dict[command]
class Create_Window(QDialog):
#& INIT
def __init__(self):
super().__init__()
self.title = "Physiological Anticipatory Activity (PAA) 2.0"
self.path_logo_UPIDE = os.path.join(os.getcwd(), '.\Presentimiento\Logo_UPIDE.png') # Logo UPIDE path
self.setWindowIcon(QtGui.QIcon(self.path_logo_UPIDE))
self.setWindowTitle(self.title)
self.setFixedSize(1600, 800)
# call functions:
self.create_settings_layout()
self.create_data_layout()
self.create_phys_layout()
self.create_stats_layout()
self.create_buttons()
# Create list of stimuli:
self.image_list_neutral = []
self.image_list_neutral_filenames = []
self.image_list_excitatory = []
self.image_list_excitatory_filenames = []
self.image_list = []
self.image_list_filenames = []
# Create the layout in grid fromat for the groups (topleft,topright,etc.)
Tab_Widget = QTabWidget()
main_layout = QVBoxLayout()
Tab_Widget.addTab(self.gb_settings, "Settings")
Tab_Widget.addTab(self.gb_session_data, "Session Data")
Tab_Widget.addTab(self.gb_phys_data, "Physiological Data")
Tab_Widget.addTab(self.gb_stats_data, "Statistical Analysis")
main_layout.addWidget(Tab_Widget)
main_layout.addLayout(self.layout_buttons,2)
self.setLayout(main_layout)
#& LAYOUT
def create_settings_layout(self):
#& GROUP BOXES
#& MAIN
self.gb_settings = QGroupBox("Session settings:")
#& 1. SOURCES & STIMULI
#& 1.1. SOURCES & TESTING
self.gb_sources_n_test = QGroupBox("RNG sources:")
#& 1.2. STIMULI
self.gb_stimuli = QGroupBox("Select stimuli:")
#& 1.3. PHYSIOLOGICAL
self.gb_physiological = QGroupBox("Select physiological data:")
#& 2. EXP DESIGN
self.gb_exp_design = QGroupBox("Experimental design:")
#& 2.1 TRIALS & SESSION
#& 2.1.1. SESSION ID
self.gb_session_id = QGroupBox("Session ID:")
#& 2.1.2. TRIAL TYPE
self.gb_trial_type = QGroupBox("Type of trials:")
#& 2.1.3. TRIALS NUM
self.gb_num_trials = QGroupBox("Number of trials:")
#& 2.2. TRIALS DURATION
self.gb_trial_duration = QGroupBox("Duration of each part of a trial (seconds):")
#& 2.2.1. TRIALS DURATION ELEMENTS
self.gb_pre_screen = QGroupBox("Pre-stimulus screen duration:")
self.gb_stimulus_duration = QGroupBox("Stimulus duration:")
self.gb_post_screen = QGroupBox("Post-stimulus screen duration:")
#& 2.3. DELAYS
self.gb_delays = QGroupBox("Delays with white screen between trials (seconds):")
#& 2.3.1. FIRST SCREEN
self.gb_first_screen = QGroupBox("Only-once before first trial:")
#& 2.3.2. DELAY BEFORE TRIAL
self.gb_before_interval = QGroupBox("Interval before each trial:")
#& 2.3.3. DELAY AFTER TRIAL
self.gb_after_interval = QGroupBox("Interval after each trial:")
#& SPIN BOXES
#& 2.1.1. SESSION ID
self.sb_session_id = QSpinBox()
self.sb_session_id.setValue(1)
#& 2.1.3. TRIALS NUM
self.sb_num_trials = QSpinBox()
self.sb_num_trials.setValue(3) #$ 45
#& 2.2.1. TRIALS DURATION ELEMENTS
self.sb_pre_screen = QSpinBox()
self.sb_pre_screen.setValue(1) #$ 3
self.sb_stim_duration = QSpinBox()
self.sb_stim_duration.setValue(1) #$ 3
self.sb_post_screen = QSpinBox()
self.sb_post_screen.setValue(1) #$ 9
#& 2.3.1. FIRST SCREEN
self.sb_first_screen = QSpinBox()
self.sb_first_screen.setValue(1) #$ 10
#& 2.3.3. DELAY BEFORE TRIAL
self.sb_before_min_interval = QSpinBox()
self.sb_before_min_interval.setValue(0) #$ 0
self.sb_before_max_interval = QSpinBox()
self.sb_before_max_interval.setValue(0) #$ 0
self.sb_before_min_interval.setEnabled(False)
self.sb_before_max_interval.setEnabled(False)
#& 2.3.3. DELAY AFTER TRIAL
self.sb_after_min_interval = QSpinBox()
self.sb_after_min_interval.setValue(0) #$ 0
self.sb_after_max_interval = QSpinBox()
self.sb_after_max_interval.setValue(1) #$ 5
#& COMBO BOXES
#& 1.1. SOURCES
self.combo_rng_sources = QComboBox()
self.combo_rng_sources.addItem("-")
#& 1.3. PHYSIOLOGICAL
self.combo_skin_conductance = QComboBox()
self.combo_skin_conductance.addItem("-")
self.combo_skin_conductance.setDisabled(True)
self.combo_heart_rate = QComboBox()
self.combo_heart_rate.addItem("-")
self.combo_heart_rate.setDisabled(True)
self.combo_brainwaves = QComboBox()
self.combo_brainwaves.addItem("-")
self.combo_brainwaves.setDisabled(True)
self.combo_skin_conductance_sample = QComboBox()
self.combo_heart_rate_sample = QComboBox()
self.combo_brainwaves_sample = QComboBox()
self.combo_skin_conductance_sample.addItems(["20 per second","10 per second","5 per second","2 per second","1 per second"])
self.combo_heart_rate_sample.addItems(["20 per second","10 per second","5 per second","2 per second","1 per second"])
self.combo_brainwaves_sample.addItems(["20 per second","10 per second","5 per second","2 per second","1 per second"])
self.combo_skin_conductance_sample.setDisabled(True)
self.combo_heart_rate_sample.setDisabled(True)
self.combo_brainwaves_sample.setDisabled(True)
#& 2.1.2. TRIAL TYPE
self.combo_trial_type = QComboBox()
self.combo_trial_type.addItem("Free-Running")
self.combo_trial_type.addItem("On-Demand")
self.combo_trial_type.currentIndexChanged.connect(self.click_trial_type)
#& TEXT BOXES
#& 1.1. TESTING & TESTING
self.tb_gen_bits = QLineEdit("") #? Add color to background: gen_bits.setStyleSheet("QLineEdit { background-color: rgb(220,220,220) }")
#& 1.3. PHYSIOLOGICAL
self.tb_neulog_port = QLineEdit("22002") #$ Localhost Port (ej. '22002')
self.tb_skin_conductance_test = QLineEdit("Test")
self.tb_heart_rate_test = QLineEdit("Test")
self.tb_brainwaves_test = QLineEdit("Test")
self.tb_skin_conductance_test.setDisabled(True)
self.tb_heart_rate_test.setDisabled(True)
self.tb_brainwaves_test.setDisabled(True)
#& BUTTONS
#& 1.1. SOURCES & TESTING
butt_refresh_sources = QPushButton('Refresh RNG sources')
butt_refresh_sources.clicked.connect(self.click_refresh_sources)
butt_generate_bits = QPushButton('Test: Generate bits')
butt_generate_bits.clicked.connect(self.click_generate_bits)
#& 1.2. STIMULI
butt_neutral_stimuli = QPushButton("Select neutral stimuli library")
butt_neutral_stimuli.clicked.connect(self.click_neutral_stimuli)
butt_excitatory_stimuli = QPushButton('Select excitatory stimuli library')
butt_excitatory_stimuli.clicked.connect(self.click_excitatory_stimuli)
#& 1.3. PHYSIOLOGICAL
butt_refresh_neulog = QPushButton('Refresh Neulog sources')
butt_refresh_neulog.clicked.connect(self.click_refresh_neulog)
butt_refresh_physiological = QPushButton('Refresh physiological sources')
butt_refresh_physiological.clicked.connect(self.click_refresh_physiological)
self.butt_skin_conductance_test = QPushButton('Test: Get values')
self.butt_skin_conductance_test.clicked.connect(self.click_skin_conductance_test)
self.butt_skin_conductance_test.setDisabled(True)
self.butt_heart_rate_test = QPushButton('Test: Get values')
self.butt_heart_rate_test.clicked.connect(self.click_heart_rate_test)
self.butt_heart_rate_test.setDisabled(True)
self.butt_brainwaves_test = QPushButton('Test: Get values')
self.butt_brainwaves_test.clicked.connect(self.click_brainwaves_test)
self.butt_brainwaves_test.setDisabled(True)
#& CHECK BOXES
#& 1.3. PHYSIOLOGICAL
self.cb_skin_conductance = QCheckBox("Skin Conductance")
self.cb_skin_conductance.toggled.connect(self.check_skin_conductance)
self.cb_heart_rate = QCheckBox("Heart Rate")
self.cb_heart_rate.toggled.connect(self.check_heart_rate)
self.cb_brainwaves = QCheckBox("Brain Waves")
self.cb_brainwaves.toggled.connect(self.check_brainwaves)
#& SET LAYOUTS
# declare layouts
layout_main = QHBoxLayout() #MAIN
layout_source_n_stimuli = QVBoxLayout() # 1. SOURCES & STIMULI
layout_sources_n_test = QGridLayout() # 1.1. SOURCES & TESTING
layout_stimuli = QHBoxLayout() # 1.2. STIMULI
layout_physiological = QGridLayout() # 1.3. PHYSIOLOGICAL
layout_exp_design = QHBoxLayout() # 2. EXP DESIGN
layout_trial_and_screen = QVBoxLayout() # 2.1. TRIALS
layout_session_id = QVBoxLayout() # 2.1.1. SESSION ID
layout_trial_type = QVBoxLayout() # 2.1.2. TRIAL TYPE
layout_trial = QVBoxLayout() # 2.1.3. TRIALS NUM
layout_duration = QGridLayout() # 2.2. TRIALS DURATION
layout_dur_pre = QVBoxLayout() # 2.2.1. TRIALS DURATION ELEMENTS
layout_dur_stimulus = QVBoxLayout() # 2.2.1. TRIALS DURATION ELEMENTS
layout_dur_post = QVBoxLayout() # 2.2.1. TRIALS DURATION ELEMENTS
layout_delays = QVBoxLayout() # 2.3. DELAYS
layout_f_screen = QVBoxLayout() #2.3.1. FIRST SCREEN
layout_before_interval = QGridLayout()# 2.3.2. DELAY BEFORE TRIAL
layout_after_interval = QGridLayout()# 2.3.3. DELAY AFTER TRIAL
#& MAIN
layout_main.addLayout(layout_source_n_stimuli,1)
layout_main.addWidget(self.gb_exp_design,1)
self.gb_settings.setLayout(layout_main)
#& 1. SOURCES & STIMULI
layout_source_n_stimuli.addWidget(self.gb_sources_n_test)
layout_source_n_stimuli.addWidget(self.gb_stimuli)
layout_source_n_stimuli.addWidget(self.gb_physiological)
#& 1.1. SOURCES & TESTING
layout_sources_n_test.addWidget(self.combo_rng_sources,0,0,1,3)
layout_sources_n_test.addWidget(butt_refresh_sources,0,3,1,1)
layout_sources_n_test.addWidget(self.tb_gen_bits,0,4,1,3)
layout_sources_n_test.addWidget(butt_generate_bits,0,7,1,1)
self.gb_sources_n_test.setLayout(layout_sources_n_test)
#& 1.2. STIMULI
layout_stimuli.addWidget(butt_neutral_stimuli)
layout_stimuli.addWidget(butt_excitatory_stimuli)
self.gb_stimuli.setLayout(layout_stimuli)
#& 1.3. PHYSIOLOGICAL
layout_physiological.addWidget(self.tb_neulog_port,0,0,1,1)
layout_physiological.addWidget(butt_refresh_neulog,0,1,1,1)
layout_physiological.addWidget(butt_refresh_physiological,0,2,1,5)
layout_physiological.addWidget(self.cb_skin_conductance,1,0,1,1)
layout_physiological.addWidget(self.cb_heart_rate,2,0,1,1)
layout_physiological.addWidget(self.cb_brainwaves,3,0,1,1)
layout_physiological.addWidget(self.combo_skin_conductance,1,1,1,1)
layout_physiological.addWidget(self.combo_heart_rate,2,1,1,1)
layout_physiological.addWidget(self.combo_brainwaves,3,1,1,1)
layout_physiological.addWidget(self.tb_skin_conductance_test,1,2,1,2)
layout_physiological.addWidget(self.tb_heart_rate_test,2,2,1,2)
layout_physiological.addWidget(self.tb_brainwaves_test,3,2,1,2)
layout_physiological.addWidget(self.butt_skin_conductance_test,1,4,1,1)
layout_physiological.addWidget(self.butt_heart_rate_test,2,4,1,1)
layout_physiological.addWidget(self.butt_brainwaves_test,3,4,1,1)
layout_physiological.addWidget(self.combo_skin_conductance_sample,1,5,1,2)
layout_physiological.addWidget(self.combo_heart_rate_sample,2,5,1,2)
layout_physiological.addWidget(self.combo_brainwaves_sample,3,5,1,2)
self.gb_physiological.setLayout(layout_physiological)
#& 2. EXP DESIGN
layout_exp_design.addLayout(layout_trial_and_screen)
layout_exp_design.addWidget(self.gb_trial_duration)
layout_exp_design.addWidget(self.gb_delays,1)
self.gb_exp_design.setLayout(layout_exp_design)
#& 2.1 TRIALS & SESSION
layout_trial_and_screen.addWidget(self.gb_session_id)
layout_trial_and_screen.addWidget(self.gb_trial_type)
layout_trial_and_screen.addWidget(self.gb_num_trials)
#& 2.1.1. SESSION ID
layout_session_id.addWidget(self.sb_session_id)
self.gb_session_id.setLayout(layout_session_id)
#& 2.1.2. TRIAL TYPE
layout_trial_type.addWidget(self.combo_trial_type)
self.gb_trial_type.setLayout(layout_trial_type)
#& 2.1.3. TRIALS NUM
layout_trial.addWidget(self.sb_num_trials)
self.gb_num_trials.setLayout(layout_trial)
#& 2.2. TRIALS DURATION
layout_duration.addWidget(self.gb_pre_screen,0,0)
layout_duration.addWidget(self.gb_stimulus_duration,1,0)
layout_duration.addWidget(self.gb_post_screen,2,0)
self.gb_trial_duration.setLayout(layout_duration)
#& 2.2.1. TRIALS DURATION ELEMENTS
layout_dur_pre.addWidget(self.sb_pre_screen)
self.gb_pre_screen.setLayout(layout_dur_pre)
layout_dur_stimulus.addWidget(self.sb_stim_duration)
self.gb_stimulus_duration.setLayout(layout_dur_stimulus)
layout_dur_post.addWidget(self.sb_post_screen)
self.gb_post_screen.setLayout(layout_dur_post)
#& 2.3. DELAYS
layout_delays.addWidget(self.gb_first_screen)
layout_delays.addWidget(self.gb_before_interval)
layout_delays.addWidget(self.gb_after_interval)
self.gb_delays.setLayout(layout_delays)
#& 2.3.1. FIRST SCREEN
layout_f_screen.addWidget(self.sb_first_screen)
self.gb_first_screen.setLayout(layout_f_screen)
#& 2.3.1. BEFORE TRIAL
layout_before_interval.addWidget(self.sb_before_min_interval,0,0)
layout_before_interval.addWidget(self.sb_before_max_interval,0,1)
self.gb_before_interval.setLayout(layout_before_interval)
#& 2.3.1. AFTER TRIAL
layout_after_interval.addWidget(self.sb_after_min_interval,0,0)
layout_after_interval.addWidget(self.sb_after_max_interval,0,1)
self.gb_after_interval.setLayout(layout_after_interval)
def create_data_layout(self):
#& GROUP BOXES
self.gb_session_data = QGroupBox("Session Data:")
#& TEXT BOX
# Create text boxes
self.tb_start_at = QLineEdit("Session started at:") #? Add color to background: tb_start_at.setStyleSheet("QLineEdit { background-color: rgb(220,220,220) }")
self.tb_finish_at = QLineEdit("Session finished at:")
self.tb_onset_at = QLineEdit("First trial started at:")
self.tb_stimulus_id = QTextEdit("Stimulus ID:")
self.tb_trial_id = QTextEdit("Trial ID:")
self.tb_time_start_trial = QTextEdit("Time at the start of trial:")
self.tb_dur_before_interval = QTextEdit("Interval before each trial (s):")
self.tb_onset_to_trial = QTextEdit("First trial to end of this trial (s):")
self.tb_seconds_end_trial = QTextEdit("Duration of each trial (s):")
self.tb_dur_after_interval = QTextEdit("Interval after each trial (s):")
self.tb_time_end_trial = QTextEdit("Time at the end of trial:")
#& SET LAYOUT
layout = QGridLayout()
#top lane
layout.addWidget(self.tb_start_at,0,0,1,3)
layout.addWidget(self.tb_onset_at,0,3,1,2)
layout.addWidget(self.tb_finish_at,0,5,1,3)
# below lane
layout.addWidget(self.tb_trial_id,1,0,5,1)
layout.addWidget(self.tb_stimulus_id,1,1,5,1)
layout.addWidget(self.tb_time_start_trial,1,2,5,1)
layout.addWidget(self.tb_time_end_trial,1,3,5,1)
layout.addWidget(self.tb_dur_before_interval,1,4,5,1)
layout.addWidget(self.tb_dur_after_interval,1,5,5,1)
layout.addWidget(self.tb_seconds_end_trial,1,6,5,1)
layout.addWidget(self.tb_onset_to_trial,1,7,5,1)
self.gb_session_data.setLayout(layout)
def create_phys_layout(self):
#& GROUP BOXES
self.gb_phys_data = QGroupBox("")
self.gb_phys_time = QGroupBox("Physiologial Time Data:")
self.gb_phys_trial_inst = QGroupBox("Trials and Instances:")
self.gb_phys_skin_conductance = QGroupBox("Skin Conductance Data:")
self.gb_phys_heart_rate = QGroupBox("Heart Rate Data:")
self.gb_phys_brainwaves = QGroupBox("Brainwaves Data:")
#& TEXT BOX
# Create text boxes
self.tb_phys_trial_id = QTextEdit("Trial ID [n]:")
self.tb_phys_instance_id = QTextEdit("Instance [i]:")
self.tb_phys_start_at = QLineEdit("Physiological data started at:")
self.tb_phys_finish_at = QLineEdit("Physiological data finished at:")
self.tb_skin_conductance_values = QTextEdit("Skin conductance values [xi]:")
self.tb_skin_conductance_timestamp = QTextEdit("Skin conductance timestamps [t_xi]:")
self.tb_heart_rate_values = QTextEdit("Heart rate values [yi]:")
self.tb_heart_rate_timestamp = QTextEdit("Heart rate timestamps [t_yi]:")
self.tb_brainwaves_values = QTextEdit("Brainwaves values [zi]:")
self.tb_brainwaves_timestamp = QTextEdit("Brainwaves timestamps [t_zi]:")
self.tb_skin_conductance_media = QTextEdit("Skin conductance media [mx_paa]:")
self.tb_skin_conductance_sd = QTextEdit("Skin conductance sd [sx_paa]:")
self.tb_skin_conductance_Z = QTextEdit("Skin conductance Z [Z_xi]:")
self.tb_skin_conductance_f = QTextEdit("Skin conductance f [f_xi]:")
self.tb_heart_rate_media = QTextEdit("Heart rate media [my_paa]:")
self.tb_heart_rate_sd = QTextEdit("Heart rate sd [sy_paa]:")
self.tb_heart_rate_Z = QTextEdit("Heart rate Z [Z_yi]:")
self.tb_heart_rate_f = QTextEdit("Heart rate f [f_yi]:")
self.tb_brainwaves_media = QTextEdit("Brainwaves media [mz_paa]:")
self.tb_brainwaves_sd = QTextEdit("Brainwaves sd [sz_paa]:")
self.tb_brainwaves_Z = QTextEdit("Brainwaves Z [Z_zi]:")
self.tb_brainwaves_f = QTextEdit("Brainwaves f [f_zi]:")
#& SET LAYOUT
main_layout = QGridLayout()
time_layout = QGridLayout()
trial_inst_layout = QGridLayout()
skin_conductance_layout = QGridLayout()
heart_rate_layout = QGridLayout()
brainwaves_layout = QGridLayout()
# time layout
time_layout.addWidget(self.tb_phys_start_at,0,0,1,4)
time_layout.addWidget(self.tb_phys_finish_at,0,4,1,4)
# trial and instances layout
trial_inst_layout.addWidget(self.tb_phys_trial_id,0,0,15,1)
trial_inst_layout.addWidget(self.tb_phys_instance_id,0,1,15,1)
# skin conductance layout
skin_conductance_layout.addWidget(self.tb_skin_conductance_values,0,0,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_timestamp,0,1,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_media,5,0,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_sd,5,1,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_Z,10,0,5,1)
skin_conductance_layout.addWidget(self.tb_skin_conductance_f,10,1,5,1)
# heart rate layout
heart_rate_layout.addWidget(self.tb_heart_rate_values,0,0,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_timestamp,0,1,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_media,5,0,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_sd,5,1,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_Z,10,0,5,1)
heart_rate_layout.addWidget(self.tb_heart_rate_f,10,1,5,1)
# brainwaves layout
brainwaves_layout.addWidget(self.tb_brainwaves_values,0,0,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_timestamp,0,1,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_media,5,0,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_sd,5,1,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_Z,10,0,5,1)
brainwaves_layout.addWidget(self.tb_brainwaves_f,10,1,5,1)
# Apply layouts
self.gb_phys_time.setLayout(time_layout)
self.gb_phys_trial_inst.setLayout(trial_inst_layout)
self.gb_phys_skin_conductance.setLayout(skin_conductance_layout)
self.gb_phys_heart_rate.setLayout(heart_rate_layout)
self.gb_phys_brainwaves.setLayout(brainwaves_layout)
# Apply main layout
main_layout.addWidget(self.gb_phys_time,0,0,1,8)
main_layout.addWidget(self.gb_phys_trial_inst,1,0,15,2)
main_layout.addWidget(self.gb_phys_skin_conductance,1,2,15,2)
main_layout.addWidget(self.gb_phys_heart_rate,1,4,15,2)
main_layout.addWidget(self.gb_phys_brainwaves,1,6,15,2)
self.gb_phys_data.setLayout(main_layout)
def create_stats_layout(self):
#& GROUP BOXES
self.gb_stats_data = QGroupBox("")
self.gb_stats_permut = QGroupBox("Randomized Permutation Settings:")
self.gb_stats_analysis = QGroupBox("Statistical Analysis Data:")
self.gb_stats_phys = QGroupBox("Include in analysis?:")
self.gb_stats_phys_D = QGroupBox("Physiological Difference D [D = Σ FnE - Σ FnN]:")
self.gb_stats_results = QGroupBox("Physiological Standard Normal Deviate Z [Z = (D – μD’)/ σD’]:")
#& TEXT BOX
self.tb_stats_ratio_n = QLineEdit("")
self.tb_stats_ratio_e = QLineEdit("")
self.tb_stats_shuffle = QLineEdit("5000")
self.tb_stats_session_id = QTextEdit("Session ID [S]:")
self.tb_stats_trial_id = QTextEdit("Trial ID [n]:")
self.tb_skin_conductance_ZD = QLineEdit("Skin conductance ZD:")
self.tb_skin_conductance_D = QLineEdit("Skin conductance D:")
self.tb_skin_conductance_Fn = QTextEdit("Skin conductance Fn [SUM_fx_paa]:")
self.tb_heart_rate_ZD = QLineEdit("Heart rate ZD:")
self.tb_heart_rate_D = QLineEdit("Heart rate D:")
self.tb_heart_rate_Fn = QTextEdit("Heart rate Fn [SUM_fy_paa]:")
self.tb_brainwaves_ZD = QLineEdit("Brainwaves ZD:")
self.tb_brainwaves_D = QLineEdit("Brainwaves D:")
self.tb_brainwaves_Fn = QTextEdit("Brainwaves Fn [SUM_fz_paa]:")
#& LABELS
self.lb_stats_ratio = QLabel("Ratio (E:N):")
self.lb_stats_dotdot = QLabel(":")
self.lb_stats_shuffle = QLabel('Randomized permutation cycles:')
#& CHECKBOXES
self.cb_stats_skin_conductance = QCheckBox("Skin Conductance")
self.cb_stats_heart_rate = QCheckBox("Heart Rate")
self.cb_stats_brainwaves = QCheckBox("Brainwaves")
#& BUTTONS
butt_shuffle = QPushButton('BEGIN ANALYSIS')
butt_shuffle.clicked.connect(self.click_shuffle)
#& SET LAYOUT
main_layout = QGridLayout()
ratio_layout = QHBoxLayout()
shuffle_layout = QHBoxLayout()
permut_layout = QGridLayout()
analysis_layout = QHBoxLayout()
phys_layout = QHBoxLayout()
phys_D_layout = QHBoxLayout()
results_layout = QHBoxLayout()
# permut layout
ratio_layout.addWidget(self.lb_stats_ratio)
ratio_layout.addWidget(self.tb_stats_ratio_e)
ratio_layout.addWidget(self.lb_stats_dotdot)
ratio_layout.addWidget(self.tb_stats_ratio_n)
shuffle_layout.addWidget(self.lb_stats_shuffle)
shuffle_layout.addWidget(self.tb_stats_shuffle)
phys_layout.addWidget(self.cb_stats_skin_conductance)
phys_layout.addWidget(self.cb_stats_heart_rate)
phys_layout.addWidget(self.cb_stats_brainwaves)
self.gb_stats_phys.setLayout(phys_layout)
phys_D_layout.addWidget(self.tb_skin_conductance_D)
phys_D_layout.addWidget(self.tb_heart_rate_D)
phys_D_layout.addWidget(self.tb_brainwaves_D)
self.gb_stats_phys_D.setLayout(phys_D_layout)
permut_layout.addLayout(ratio_layout,0,0,1,1)
permut_layout.addLayout(shuffle_layout,1,0,1,1)
permut_layout.addWidget(self.gb_stats_phys,0,1,2,2)
permut_layout.addWidget(self.gb_stats_phys_D,0,3,2,2)
# session and trials layout
analysis_layout.addWidget(self.tb_stats_session_id)
analysis_layout.addWidget(self.tb_stats_trial_id)
analysis_layout.addWidget(self.tb_skin_conductance_Fn)
analysis_layout.addWidget(self.tb_heart_rate_Fn)
analysis_layout.addWidget(self.tb_brainwaves_Fn)
# Results layout
results_layout.addWidget(self.tb_skin_conductance_ZD)
results_layout.addWidget(self.tb_heart_rate_ZD)
results_layout.addWidget(self.tb_brainwaves_ZD)
# Apply layouts
self.gb_stats_permut.setLayout(permut_layout)
self.gb_stats_analysis.setLayout(analysis_layout)
self.gb_stats_results.setLayout(results_layout)
# Apply main layout
main_layout.addWidget(self.gb_stats_permut,0,0,4,5)
main_layout.addWidget(butt_shuffle,4,0,1,5)
main_layout.addWidget(self.gb_stats_analysis,5,0,10,5)
main_layout.addWidget(self.gb_stats_results,15,2,1,3)
self.gb_stats_data.setLayout(main_layout)
def create_buttons(self):
#& BUTTONS
self.butt_start_session = QPushButton("START SESSION")
self.butt_start_session.clicked.connect(self.click_start_session)
self.butt_stop = QPushButton("STOP SESSION")
self.butt_stop.clicked.connect(self.click_stop)
self.butt_clear_data = QPushButton("Clear All Data")
self.butt_clear_data.clicked.connect(self.click_clear_data)
self.butt_export_CSV = QPushButton("Export Session Data to CSV")
self.butt_export_CSV.clicked.connect(self.click_export_CSV)
self.butt_export_CSV_phys = QPushButton("Export Physiological Data to CSV")
self.butt_export_CSV_phys.clicked.connect(self.click_export_CSV_phys)
#& SET LAYOUT
self.layout_buttons = QGridLayout()
self.layout_buttons.addWidget(self.butt_start_session,0,0,1,4)
self.layout_buttons.addWidget(self.butt_stop,1,0)
self.layout_buttons.addWidget(self.butt_clear_data,1,1)
self.layout_buttons.addWidget(self.butt_export_CSV,1,2)
self.layout_buttons.addWidget(self.butt_export_CSV_phys,1,3)
#& CLICK BUTTONS
def click_start_session(self):
# Call start_session with stated number of trials
self.start_session(int(self.sb_num_trials.value()))
def click_refresh_physiological(self): #!
pass
def click_refresh_neulog(self):
# Create neulog class
neu = Neulog(self.tb_neulog_port.text())
# Check the status of the Neulog server
status = neu.get_status()
# If server is ready, clear combos, add "neulog" and message "Ready"
if status == 'Neulog API status: Ready':
self.combo_skin_conductance.clear()
self.combo_heart_rate.clear()
self.combo_brainwaves.clear()
self.combo_skin_conductance.addItem(neu.name)
self.combo_heart_rate.addItem(neu.name)
self.combo_brainwaves.addItem(neu.name)
QMessageBox.about(self, "Neulog", "Neulog API status: Ready")
# If server is in experiment, stop experiment and message "stopping experiment"
elif status == 'Neulog API status: Experiment':
QMessageBox.about(self, "Neulog", "Stopping Neulog experiment, try again...")
neu.exp_stop()
else:
QMessageBox.about(self, "Neulog", "Impossible to connect, check port number")
def click_skin_conductance_test(self):
# Create neulog class with GSR sensor
neu = Neulog(self.tb_neulog_port.text(), 'GSR')
# Set GSR sensor range to miliSiemens
neu.set_sensor_range('2')
# if neulog is selected...
if neu.name == self.combo_skin_conductance.currentText():
# Obtain values
self.tb_skin_conductance_test.setText("GSR: " + str(neu.get_values()[0]))
else:
pass
def click_heart_rate_test(self):
# Create neulog class with Pulse sensor
neu = Neulog(self.tb_neulog_port.text(), 'Pulse')
neu.set_sensor_range('1')
# if neulog is selected...
if neu.name == self.combo_heart_rate.currentText():
# Obtain values
self.tb_heart_rate_test.setText("Pulse: " + str(neu.get_values()[0]))
else:
pass
def click_brainwaves_test(self): #!
pass
def check_skin_conductance(self):
if self.cb_skin_conductance.isChecked():
self.combo_skin_conductance.setEnabled(True)
self.tb_skin_conductance_test.setEnabled(True)
self.butt_skin_conductance_test.setEnabled(True)
self.combo_skin_conductance_sample.setEnabled(True)
else:
self.combo_skin_conductance.setEnabled(False)
self.tb_skin_conductance_test.setEnabled(False)
self.butt_skin_conductance_test.setEnabled(False)
self.combo_skin_conductance_sample.setEnabled(False)
def check_heart_rate(self):
if self.cb_heart_rate.isChecked():
self.combo_heart_rate.setEnabled(True)
self.tb_heart_rate_test.setEnabled(True)
self.butt_heart_rate_test.setEnabled(True)
self.combo_heart_rate_sample.setEnabled(True)
else:
self.combo_heart_rate.setEnabled(False)
self.tb_heart_rate_test.setEnabled(False)
self.butt_heart_rate_test.setEnabled(False)
self.combo_heart_rate_sample.setEnabled(False)
def check_brainwaves(self):
if self.cb_brainwaves.isChecked():
self.combo_brainwaves.setEnabled(True)
self.tb_brainwaves_test.setEnabled(True)
self.butt_brainwaves_test.setEnabled(True)
self.combo_brainwaves_sample.setEnabled(True)
else:
self.combo_brainwaves.setEnabled(False)
self.tb_brainwaves_test.setEnabled(False)
self.butt_brainwaves_test.setEnabled(False)
self.combo_brainwaves_sample.setEnabled(False)
def click_refresh_sources(self):
self.combo_rng_sources.clear()
pseudo = Pseudo_RNG()
self.combo_rng_sources.addItem(pseudo.name)
psyleron = PsyREG()
if psyleron.count_PsyREGs() >= 1:
self.combo_rng_sources.addItem(str(psyleron.get_name()))
else:
pass
def click_generate_bits(self):
self.tb_gen_bits.clear()
# self.gen_bits.setText("00101")
psyleron = PsyREG()
pseudo = Pseudo_RNG()
if str(psyleron.get_name()) == self.combo_rng_sources.currentText():
if psyleron.count_PsyREGs() >= 1:
self.tb_gen_bits.setText("Psyleron:" + str(psyleron.get_bits(6)))
psyleron.clear_RNG()
psyleron.release_RNG()
else:
QMessageBox.about(self, "ERROR", "Psyleron didn't send bits")
else:
if pseudo.name == self.combo_rng_sources.currentText():
self.tb_gen_bits.setText("Pseudo-RNG:" + str(pseudo.get_bits(6)))
else:
QMessageBox.about(self, "ERROR", "Pseudo-RNG didn't send bits")
def click_clear_data(self):
# Establish again the normal texts
self.tb_start_at.setText("Session started at:")
self.tb_finish_at.setText("Session finished at:")
self.tb_onset_at.setText("First trial started at:")
self.tb_skin_conductance_D.setText("Skin conductance D:")
self.tb_heart_rate_D.setText("Heart rate D:")
self.tb_brainwaves_D.setText("Brainwaves D:")
self.tb_trial_id.setText("Trial ID:")
self.tb_stimulus_id.setText("Stimulus ID:")
self.tb_time_start_trial.setText("Time at the start of trial:")
self.tb_onset_to_trial.setText("First trial to end of this trial (s):")
self.tb_seconds_end_trial.setText("Duration of each trial (s):")
self.tb_dur_after_interval.setText("Interval after each trial (s):")
self.tb_dur_before_interval.setText("Interval before trial (s):")
self.tb_time_end_trial.setText("Time at the end of trial:")
self.tb_skin_conductance_Fn.setText("Skin conductance Fn [Σf_xi_paa]:")
self.tb_heart_rate_Fn.setText("Heart rate Fn [Σf_yi_paa]:")
self.tb_brainwaves_Fn.setText("Brainwaves Fn [Σf_zi_paa]:")
self.tb_phys_start_at.setText("Physiological data started at:")
self.tb_phys_finish_at.setText("Physiological data finished at:")
self.tb_phys_trial_id.setText("Trial ID [n]:")
self.tb_phys_instance_id.setText("Instance [i]:")
self.tb_skin_conductance_values.setText("Skin conductance values [xi]:")
self.tb_skin_conductance_timestamp.setText("Skin conductance timestamps [t_xi]:")
self.tb_skin_conductance_media.setText("Skin conductance media [mx_paa]:")
self.tb_skin_conductance_sd.setText("Skin conductance sd [sx_paa]:")
self.tb_skin_conductance_Z.setText("Skin conductance Z [Z_xi]:")
self.tb_skin_conductance_f.setText("Skin conductance f [f_xi]:")
self.tb_heart_rate_values.setText("Heart rate values [yi]:")
self.tb_heart_rate_timestamp.setText("Heart rate timestamps [t_yi]:")
self.tb_heart_rate_media.setText("Heart rate media [my_paa]:")
self.tb_heart_rate_sd.setText("Heart rate sd [sy_paa]:")
self.tb_heart_rate_Z.setText("Heart rate Z [Z_yi]:")
self.tb_heart_rate_f.setText("Heart rate f [f_yi]:")
self.tb_brainwaves_values.setText("Brainwaves values [zi]:")
self.tb_brainwaves_timestamp.setText("Brainwaves timestamps [t_zi]:")
self.tb_brainwaves_media.setText("Brainwaves media [mz_paa]:")
self.tb_brainwaves_sd.setText("Brainwaves sd [sz_paa]:")
self.tb_brainwaves_Z.setText("Brainwaves Z [Z_zi]:")
self.tb_brainwaves_f.setText("Brainwaves f [f_zi]:")
def click_export_CSV_phys(self):
# Convert text in textbox to string
str_session_id = "S" + self.sb_session_id.text()
str_phys_trial_id = self.tb_phys_trial_id.toPlainText()
str_phys_instance_id = self.tb_phys_instance_id.toPlainText()
str_skin_conductance_values = self.tb_skin_conductance_values.toPlainText()
str_skin_conductance_timestamp = self.tb_skin_conductance_timestamp.toPlainText()
str_skin_conductance_media = self.tb_skin_conductance_media.toPlainText()
str_skin_conductance_sd = self.tb_skin_conductance_sd.toPlainText()
str_skin_conductance_Z = self.tb_skin_conductance_Z.toPlainText()
str_skin_conductance_f = self.tb_skin_conductance_f.toPlainText()
str_heart_rate_values = self.tb_heart_rate_values.toPlainText()
str_heart_rate_timestamp = self.tb_heart_rate_timestamp.toPlainText()
str_heart_rate_media = self.tb_heart_rate_media.toPlainText()
str_heart_rate_sd = self.tb_heart_rate_sd.toPlainText()
str_heart_rate_Z = self.tb_heart_rate_Z.toPlainText()
str_heart_rate_f = self.tb_heart_rate_f.toPlainText()
str_brainwaves_values = self.tb_brainwaves_values.toPlainText()
str_brainwaves_timestamp = self.tb_brainwaves_timestamp.toPlainText()
str_brainwaves_media = self.tb_brainwaves_media.toPlainText()
str_brainwaves_sd = self.tb_brainwaves_sd.toPlainText()
str_brainwaves_Z = self.tb_brainwaves_Z.toPlainText()
str_brainwaves_f = self.tb_brainwaves_f.toPlainText()
# Convert string to list
list_session_id = str_session_id.split("\n")
list_phys_trial_id = str_phys_trial_id.split("\n")
list_phys_instance_id = str_phys_instance_id.split("\n")
list_skin_conductance_values = str_skin_conductance_values.split("\n")
list_skin_conductance_timestamp = str_skin_conductance_timestamp.split("\n")
list_skin_conductance_media = str_skin_conductance_media.split("\n")
list_skin_conductance_sd = str_skin_conductance_sd.split("\n")
list_skin_conductance_Z = str_skin_conductance_Z.split("\n")
list_skin_conductance_f = str_skin_conductance_f.split("\n")
list_heart_rate_values = str_heart_rate_values.split("\n")
list_heart_rate_timestamp = str_heart_rate_timestamp.split("\n")
list_heart_rate_media = str_heart_rate_media.split("\n")
list_heart_rate_sd = str_heart_rate_sd.split("\n")
list_heart_rate_Z = str_heart_rate_Z.split("\n")
list_heart_rate_f = str_heart_rate_f.split("\n")
list_brainwaves_values = str_brainwaves_values.split("\n")
list_brainwaves_timestamp = str_brainwaves_timestamp.split("\n")
list_brainwaves_media = str_brainwaves_media.split("\n")
list_brainwaves_sd = str_brainwaves_sd.split("\n")
list_brainwaves_Z = str_brainwaves_Z.split("\n")
list_brainwaves_f = str_brainwaves_f.split("\n")
# Remove first line in each of the session data lists
del list_phys_trial_id[0]
del list_phys_instance_id[0]
del list_skin_conductance_values[0]
del list_skin_conductance_timestamp[0]
del list_skin_conductance_media[0]
del list_skin_conductance_sd[0]
del list_skin_conductance_Z[0]
del list_skin_conductance_f[0]
del list_heart_rate_values[0]
del list_heart_rate_timestamp[0]
del list_heart_rate_media[0]
del list_heart_rate_sd[0]
del list_heart_rate_Z[0]
del list_heart_rate_f[0]
del list_brainwaves_values[0]
del list_brainwaves_timestamp[0]
del list_brainwaves_media[0]
del list_brainwaves_sd[0]
del list_brainwaves_Z[0]
del list_brainwaves_f[0]
# Convert list to series
ser_session_id = pandas.Series(list_session_id, name='Session ID [S]:')
ser_phys_trial_id = pandas.Series(list_phys_trial_id, name='Trial ID [n]:')
ser_phys_instance_id = pandas.Series(list_phys_instance_id, name='Instance ID [i]:')
ser_skin_conductance_values = pandas.Series(list_skin_conductance_values, name='Skin Conductance Values[xi]:')
ser_skin_conductance_timestamp = pandas.Series(list_skin_conductance_timestamp, name='Skin Conductance Timestamp[t_xi]:')
ser_skin_conductance_media = pandas.Series(list_skin_conductance_media, name='Skin Conductance Media[mx_paa]:')
ser_skin_conductance_sd = pandas.Series(list_skin_conductance_sd, name='Skin Conductance SD [sx_paa]:')
ser_skin_conductance_Z = pandas.Series(list_skin_conductance_Z, name='Skin Conductance Z [Z_xi]:')
ser_skin_conductance_f = pandas.Series(list_skin_conductance_f, name='Skin Conductance f [f_xi]:')
ser_heart_rate_values = pandas.Series(list_heart_rate_values, name='Heart Rate Values [yi]:')
ser_heart_rate_timestamp = pandas.Series(list_heart_rate_timestamp, name='Heart Rate Timestamp [t_yi]:')
ser_heart_rate_media = pandas.Series(list_heart_rate_media, name='Heart Rate Media [my_paa]:')
ser_heart_rate_sd = pandas.Series(list_heart_rate_sd, name='Heart Rate SD [sy_paa]:')
ser_heart_rate_Z = pandas.Series(list_heart_rate_Z, name='Heart Rate Z [Z_yi]:')
ser_heart_rate_f = pandas.Series(list_heart_rate_f, name='Heart Rate f [f_yi]:')
ser_brainwaves_values =
|
pandas.Series(list_brainwaves_values, name='Brainwaves Values [zi]:')
|
pandas.Series
|
#!/usr/bin/env python
"""Tests for `data_documenter` package."""
import pytest
import pandas as pd
from datadoc import DataDocumenter
@pytest.fixture
def input_data_frame():
data = {'col1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'col2': ['M', 'F', 'M', 'F', 'M', 'M', 'M', None, 'F', 'F'],
'col3': [2.2, 3.45, 1.2, 1.1, 5.0, 4.44, 56.6, 2.4, 5, 33.333]}
return pd.DataFrame(data)
@pytest.fixture
def var_name_expected():
"""Creates expected output for test_variable_names"""
return pd.DataFrame({'variable_name': ['col1', 'col2', 'col3'],
'variable': ['Age in years', 'Gender', 'Measure']})
@pytest.fixture
def summary_stats_expected():
"""Creates a data frame with expected output for the summary stats test"""
data = {'variable_name': ['col1', 'col3'],
'count': [10.0, 10.0],
'mean': [5.5, 11.4723],
'std': [3.0276503540974917, 18.539382454356158],
'min': [1.0, 1.1],
'25%': [3.25, 2.25],
'50%': [5.5, 3.9450000000000003],
'75%': [7.75, 5.0],
'max': [10.0, 56.6],
'Missing Values': [0, 0]}
return
|
pd.DataFrame(data)
|
pandas.DataFrame
|
__author__ = "<NAME>"
__date__ = "Dec 14, 2010"
import csv
import json
from pathlib import Path
from numpy import NaN, concatenate
from openpyxl import load_workbook
from pandas import DataFrame, ExcelWriter, read_excel
from corems.mass_spectrum.output.export import HighResMassSpecExport
from corems.molecular_id.calc.SpectralSimilarity import methods_name
from corems.encapsulation.constant import Atoms
from corems.encapsulation.output import parameter_to_dict
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecfromFreq
from corems import __version__, corems_md5
import uuid
class LowResGCMSExport():
def __init__(self, out_file_path, gcms):
'''
output_type: str
'excel', 'csv', 'hdf5' or 'pandas'
'''
self.output_file = Path(out_file_path)
self.gcms = gcms
self._init_columns()
def _init_columns(self):
columns = ['Sample name', 'Peak Index', 'Retention Time', 'Retention Time Ref', 'Peak Height',
'Peak Area', 'Retention index', 'Retention index Ref', 'Retention Index Score',
'Similarity Score',
'Spectral Similarity Score',
'Compound Name',
"Chebi ID", "Kegg Compound ID",
"Inchi", "Inchi Key",
"Smiles",
"Molecular Formula",
"IUPAC Name",
"Traditional Name",
"Common Name",
'Derivatization'
]
if self.gcms.molecular_search_settings.exploratory_mode:
columns.extend(['Weighted Cosine Correlation',
'Cosine Correlation',
'Stein Scott Similarity',
'Pearson Correlation',
'Spearman Correlation',
'Kendall Tau Correlation',
'Euclidean Distance',
'Manhattan Distance',
'Jaccard Distance',
'DWT Correlation',
'DFT Correlation'])
columns.extend(list(methods_name.values()))
return columns
def get_pandas_df(self, id_label="corems:"):
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
df = DataFrame(dict_data_list, columns=columns)
df.name = self.gcms.sample_name
return df
def get_json(self, nan=False, id_label="corems:"):
import json
dict_data_list = self.get_list_dict_data(self.gcms)
return json.dumps(dict_data_list, sort_keys=False, indent=4, separators=(',', ': '))
def to_pandas(self, write_metadata=True, id_label="corems:"):
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
df = DataFrame(dict_data_list, columns=columns)
df.to_pickle(self.output_file.with_suffix('.pkl'))
if write_metadata:
self.write_settings(self.output_file.with_suffix('.pkl'), self.gcms, id_label="corems:")
def to_excel(self, write_mode='a', write_metadata=True, id_label="corems:"):
out_put_path = self.output_file.with_suffix('.xlsx')
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
df = DataFrame(dict_data_list, columns=columns)
if write_mode == 'a' and out_put_path.exists():
writer = ExcelWriter(out_put_path, engine='openpyxl')
# try to open an existing workbook
writer.book = load_workbook(out_put_path)
# copy existing sheets
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
# read existing file
reader =
|
read_excel(out_put_path)
|
pandas.read_excel
|
##################################################
# Helper functions for data loading.
##################################################
# Author: <NAME>
# Email: <EMAIL>
# Author: <NAME>
# Email: <EMAIL>
##################################################
import os
import zipfile
from io import BytesIO
import pandas as pd
def create_rwhar_dataset(raw_dir):
"""
Function to create RWHAR dataset (containing only acceleration data).
Original Author : <NAME>, <EMAIL>
Modified by: <NAME>
:return: pandas dataframe containing all data
"""
RWHAR_ACTIVITY_NUM = {
"climbingdown": 'climbing_down',
"climbingup": 'climbing_up',
"jumping": 'jumping',
"lying": 'lying',
"running": 'running',
"sitting": 'sitting',
"standing": 'standing',
"walking": 'walking',
}
RWHAR_BAND_LOCATION = {
"chest": 1,
"forearm": 2,
"head": 3,
"shin": 4,
"thigh": 5,
"upperarm": 6,
"waist": 7,
}
def check_rwhar_zip(path):
# verify that the path is to the zip containing csv and not another zip of csv
if any(".zip" in filename for filename in zipfile.ZipFile(path, "r").namelist()):
# There are multiple zips in some cases
with zipfile.ZipFile(path, "r") as temp:
path = BytesIO(temp.read(
max(temp.namelist()))) # max chosen so the exact same acc and gyr files are selected each time (repeatability)
return path
def rwhar_load_csv(path):
# Loads up the csv at given path, returns a dictionary of data at each location
path = check_rwhar_zip(path)
tables_dict = {}
with zipfile.ZipFile(path, "r") as Zip:
zip_files = Zip.namelist()
for csv in zip_files:
if "csv" in csv:
location = RWHAR_BAND_LOCATION[
csv[csv.rfind("_") + 1:csv.rfind(".")]] # location is between last _ and .csv extension
sensor = csv[:3]
prefix = sensor.lower() + "_"
table = pd.read_csv(Zip.open(csv))
table.rename(columns={"attr_x": prefix + "x",
"attr_y": prefix + "y",
"attr_z": prefix + "z",
"attr_time": "timestamp",
}, inplace=True)
table.drop(columns="id", inplace=True)
tables_dict[location] = table
return tables_dict
def rwhar_load_table_activity(path_acc):
# Logic for loading each activity zip file for acc and gyr and then merging the tables at each location
acc_tables = rwhar_load_csv(path_acc)
data = pd.DataFrame()
for loc in acc_tables.keys():
acc_tab = acc_tables[loc]
acc_tab =
|
pd.DataFrame(acc_tab)
|
pandas.DataFrame
|
#!/user/bin/python3
# Copyright 2018 BlueCat Networks. All rights reserved.
# Various Flask framework items.
import os
import sys
import codecs
from flask import (url_for, redirect, render_template,
flash, g, session, jsonify, request, Markup)
from random import randint
from collections import defaultdict
from bluecat import route, util
from main_app import app
from .anycastConfig_form import GenericFormTemplate
from .config import conf
from .anycast_config import main
import config.default_config as config
import shutil
import pandas as pd
def module_path():
return os.path.dirname(os.path.abspath(__file__))
@route(app, '/anycastConfig/anycastConfig_endpoint')
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anycastConfig_page():
form = GenericFormTemplate()
random_number = str(randint(101, 999))
session['folder_name'] = session['username'] + random_number
processing_folder = os.path.join(conf.get('processing_folder', '.'),
session['folder_name'])
if os.path.exists(processing_folder):
shutil.rmtree(processing_folder)
os.makedirs(processing_folder, mode=0o777)
source_file_path = os.path.join(conf.get('processing_folder'),
'anycast_config.py')
destination_file_path = os.path.join(processing_folder,
'anycast_config.py')
shutil.copyfile(source_file_path, destination_file_path)
print(processing_folder)
os.chmod(destination_file_path, mode=0o777)
return render_template(
'anycastConfig_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
@route(app, '/anycastConfig/form', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anycastConfig_page_form():
form = GenericFormTemplate()
if form.validate_on_submit():
session['clientID'] = form.client_id.data
session['password'] = form.password.data
session['ip_address'] = form.ip_address.data
session['port'] = form.port.data
args = {'action': 'show_daemons'}
output = main(args)
print(output)
if not output.count('Unsuccessful'):
g.user.logger.info('SUCCESS')
return jsonify({'responce': 'this is responce',
'status': 'created connection',
'output': output})
else:
return jsonify(
{'exception': 'Please check your credentials',
'redirect': url_for(
'anycastConfiganycastConfig_anycastConfig_page')})
else:
return jsonify({'responce': 'validation failed'})
@route(app, '/anycastConfig/update_status', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_update_status():
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
@route(app, '/anycastConfig/update_textfield', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_update_textfiled():
option = request.form.get('option')
output = main({
'action': 'show_run_conf',
'daemon': option
})
return jsonify({'text_field': output})
@route(app, '/anycastConfig/update_textfield_staged', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_update_textfiled_staged():
option = request.form.get('option')
output = main({
'action': 'show_staged_conf',
'daemon': option
})
if output == '' or output.count('Unsuccessful'):
output = 'File is not staged'
return jsonify({'text_field': output})
@route(app, '/anycastConfig/update_configuration', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_update_configuration():
option = request.form.get('option')
text = request.form.get('confText')
if text.count(option):
print(text)
conf_file_path \
= os.path.join(conf.get('processing_folder'),
session['folder_name'], option+'.conf')
with open(conf_file_path, 'w') as option_file:
option_file.write(text)
print("set_staged_conf ouput :", main({
'action': 'set_staged_conf',
'daemon': option,
'file': conf_file_path}))
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
else:
return jsonify({'exception': 'Please check your configuration file'})
@route(app, '/anycastConfig/clear_configuration', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_clear_configuration():
print("this function is clearing configuration")
option = request.form.get('option')
main({
'action': 'no_staged_conf',
'daemon': option
})
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
@route(app, '/anycastConfig/clear_run_configuration', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_clear_run_configuration():
print("this function is clearing running configuration")
option = request.form.get('option')
main({
'action': 'no_run_conf',
'daemon': option
})
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
@route(app, '/anycastConfig/run_daemon', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_run_daemon():
option = request.form.get('option')
main({
'action': 'start',
'daemon': option
})
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
@route(app, '/anycastConfig/stop_daemon', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_stop_daemon():
option = request.form.get('option')
print('this is option for stoping the daemin', option)
main({
'action': 'pause',
'daemon': option
})
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
@route(app, '/anycastConfig/apply_configuration', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_applythestagedConfiguration():
staged_conf = main({
'action': 'apply'
})
if staged_conf != '' or 'Unsuccessful' in staged_conf:
daemons_status, select_field = get_stats()
return jsonify({'output': create_status_table(daemons_status),
'select_field': select_field})
else:
return jsonify({'exception': 'Stage configuration before applying'})
@route(app, '/anycastConfig/debug', methods=['POST'])
@util.workflow_permission_required('anycastConfig_page')
@util.exception_catcher
def anycastConfig_anacastConfig_debug():
print("this function is debug")
table_contents = [
["Zebra", "zebraSummary",
main({'action': 'show_debug', 'option': 'zebraSummary'})],
["", 'routes', main({'action': 'show_debug', 'option': 'routes'})],
["", 'interfaces',
main({'action': 'show_debug', 'option': 'interfaces'})],
["", 'runningConfig',
main({'action': 'show_debug', 'option': 'runningConfig'})],
["BGP", 'bgpSummary',
main({'action': 'show_debug', 'option': 'bgpSummary'})],
["", 'bgpNeighbors',
main({'action': 'show_debug', 'option': 'bgpNeighbors'})],
["OSPF", 'ospfNeighbors',
main({'action': 'show_debug', 'option': 'ospfNeighbors'})],
["", 'ospfRoutes',
main({'action': 'show_debug', 'option': 'ospfRoutes'})],
["", 'ospfRouterInfo',
main({'action': 'show_debug', 'option': 'ospfRouterInfo'})],
["", 'ospfDatabase',
main({'action': 'show_debug', 'option': 'ospfDatabase'})]]
for i, _ in enumerate(table_contents):
table_contents[i][2] =\
table_contents[i][2].replace('\n', '</div> <div>')
table_contents[i][2] = '<div>' + table_contents[i][2] + '</div>'
table_contents[i][2] = table_contents[i][2].replace('\r', ' ')
|
pd.set_option('display.max_colwidth', -1)
|
pandas.set_option
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
# Standard imports
import json
import os
from datetime import datetime, timedelta
# Third party imports
import numpy as np
import pandas as pd
import rasterio
import xarray as xr
from statsmodels.nonparametric.smoothers_lowess import lowess
def ard_preprocess(
sat_file_links,
w_df,
sat_res_x,
var_name,
interp_date_start,
interp_date_end,
w_parms,
input_days,
output_days,
ref_tm,
w_mn,
w_sd,
):
"""
This method takes boundary satellite paths and weather data, creates Analysis Ready DataSet (ARD)
# TODO: Add doc string or re-arrange parameters
"""
sat_file_links["sat_data"] = [
rasterio.open(x).read(1) for x in sat_file_links.filePath.values
]
getgeo1 = rasterio.open(
sat_file_links.filePath.values[0]
).transform # coordinates of farm
sat_data = np.array(sat_file_links.sat_data.values.tolist())
msk = np.broadcast_to(
np.mean(sat_data == 0, axis=0) < 1, sat_data.shape
) # mask for removing pixels with 0 value always
sat_data1 = np.where(msk, sat_data, np.nan)[
:, ::sat_res_x, ::sat_res_x
] # spatial sampling
idx = pd.date_range(interp_date_start, interp_date_end) # interpolation range
idx_time = pd.date_range(
w_df.dateTime.sort_values().values[0][:10],
w_df.dateTime.sort_values(ascending=False).values[0][:10],
)
# read satellite data into data array
data_array = (
xr.DataArray(
sat_data1,
[
("time", pd.to_datetime(sat_file_links.sceneDateTime).dt.date),
(
"lat",
getgeo1[5] + getgeo1[4] * sat_res_x * np.arange(sat_data1.shape[1]),
),
(
"long",
getgeo1[2] + getgeo1[0] * sat_res_x * np.arange(sat_data1.shape[2]),
),
],
)
.to_dataframe(var_name)
.dropna()
.unstack(level=[1, 2])
)
# lowess smoothing to remove outliers and cubic spline interpolation
data_array = data_array.sort_index(ascending=True) ## Sort before calculating xvals
xvals = (pd.Series(data_array.index) - data_array.index.values[0]).dt.days
data_inter = pd.DataFrame(
{
x: lowess(data_array[x], xvals, is_sorted=True, frac=0.2, it=0)[:, 1]
for x in data_array.columns
}
)
data_inter.index = data_array.index
data_comb_array = (
data_inter.reindex(idx, fill_value=np.nan)
.interpolate(method="cubic", limit_direction="both", limit=100)
.reindex(idx_time, fill_value=np.nan)
)
# Read Weather Data and normalization
w_df[w_parms] = (w_df[w_parms] - w_mn) / (np.maximum(w_sd, 0.001))
w_df["time"] = pd.to_datetime(w_df.dateTime).dt.date
# combine interpolated satellite data array with weather data
data_comb_df = (
data_comb_array.stack([1, 2], dropna=False)
.rename_axis(["time", "lat", "long"])
.reset_index()
)
data_comb_df["time"] =
|
pd.to_datetime(data_comb_df.time)
|
pandas.to_datetime
|
"""
Module of functions that apply to/get choice data for a trials dataframe (monkeys).
"""
from macaque.f_toolbox import *
from collections import Counter
import pandas as pd
import numpy as np
tqdm = ipynb_tqdm()
from macaque.f_trials import add_chosenEV
#%%
#from numba import jit
#from numpy import arange
#import numba
#@jit(debug=True)
def get_options(trials,
mergeBy='all',
byDates=False,
mergeSequentials=True,
sideSpecific=None,
plotTQDM=True):
'''
From a trials dataFrame, make a new dataframe which has every outcome pair and the choices between them. If sideSpecific is True,
return two dataframes for options presented on the right, and options presented on the left.
Parameters
----------
trials : DataFrame
DataFrame of trials.
sideSpecific : Boolean
if True, returns two Dataframes - one where primaries are all on the left, and one where they are all on the right
mergeBy: String
'all', 'sequenceType', or 'block'. Chooses if you want to separate psychometric data based on where they came from.
byDates: list
list of dates to select trials from + looks at choices per-day rather than all together. If byDates is None, merge all across days.
mergeSequentials: Boolean
if True merge trials from sequential blocks that share glist, or all the same gamble/safe pairs
Returns
----------
choiceDate : DataFrame
DataFrame of choices for outcome pairs, choiceData captures multiple choices, reaction times, errors, and indices
*--if sideSpecific is True--*
choiceDataA, choiceDataB are returned rather than ChoiceData.
'''
if 'chosenEV' not in trials.columns:
trials = add_chosenEV(trials) #add the chosenEV column
if 'filteredRT' not in trials.columns:
trials.loc[:,'filteredRT'] = np.nan #add the chosenEV column
#this runs first, and recurse the function over itself per day.
if byDates:
dates = trials.sessionDate.unique()
dfs = []
for session in tqdm(
dates, desc='Gathering Daily Choice Data',
disable=not plotTQDM):
dfs.append(
get_options(
trials.loc[trials.sessionDate == session],
mergeBy=mergeBy,
mergeSequentials=mergeSequentials,
sideSpecific=sideSpecific).assign(sessionDate=session)
)
choiceData = pd.concat(dfs, ignore_index=True)
print('\nMerge choices from', str(len(dfs)), 'different days.')
print('There were', str(len(choiceData)),
'unique choice situations in these sessions')
return choiceDF(choiceData)
#-------------------------------------------------------------
if mergeBy.lower() == 'block':
dfs = []
for i, block in enumerate(trials.blockNo.unique()):
if len(np.unique(trials.loc[trials.blockNo == block].trialSequenceMode.unique())) > 1:
print('\n', trials.sessionDate.tolist()[0].strftime('%Y-%m-%d'),': deleted block', str(block), 'due to inconsistency')
continue
sequence = int(
np.unique(trials.loc[trials.blockNo == block]
.trialSequenceMode.unique()))
glist = np.unique(
trials.loc[trials.blockNo == block].sequenceFilename.unique())
dfs.append(
get_options(
trials.loc[trials.blockNo == block],
sideSpecific=sideSpecific).assign(
division=i,
seqType=sequence,
gList=str(glist).strip('[]')))
choiceData =
|
pd.concat(dfs, ignore_index=True)
|
pandas.concat
|
"""
Usage Instructions:
"""
import csv
import numpy as np
# import pickle
import random
import tensorflow as tf
import pandas as pd
from maml import MAML
from scene_sampling import SLICProcessor, TaskSampling
from tensorflow.python.platform import flags
from utils import tasksbatch_generator, sample_generator, meta_train_test, save_tasks, read_tasks, \
savepts_fortask
from Unsupervised_Pretraining.DAS_pretraining import DAS
from sklearn.metrics._classification import accuracy_score
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
FLAGS = flags.FLAGS
"""hyperparameter setting"""
"""for task sampling"""
flags.DEFINE_float('M', 250, 'determine how distance influence the segmentation')
flags.DEFINE_integer('K', 256, 'number of superpixels')
flags.DEFINE_integer('loop', 5, 'number of SLIC iterations')
#flags.DEFINE_string('seg_path', './src_data/CompositeBands2.tif', 'path to segmentation result of tasks by SLIC')
flags.DEFINE_string('str_region', '', 'the region to be sampling tasks')
flags.DEFINE_string('landslide_pts', './src_data/samples_fj_rand.xlsx', 'path to (non)landslide samples')
"""for meta-train"""
flags.DEFINE_integer('mode', 3, '0:meta train part of FJ, test the other part of FJ; \
1:meta train FJ, test FL; \
2:meta train part of FJ and FL, test the other part FJ; \
3:meta train FJ and part of FL, test the other part FL')
flags.DEFINE_string('path', 'tasks', 'folder path of tasks file(excel)')
flags.DEFINE_string('basemodel', 'DAS', 'MLP: no unsupervised pretraining; DAS: pretraining with DAS')
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_string('log', './tmp/data', 'batch_norm, layer_norm, or None')
flags.DEFINE_string('logdir', './checkpoint_dir', 'directory for summaries and checkpoints.')
flags.DEFINE_integer('num_classes', 2, 'number of classes used in classification (e.g. 2-way classification, landslide and nonlandslide).')
flags.DEFINE_integer('dim_input', 16, 'dim of input data')
flags.DEFINE_integer('dim_output', 2, 'dim of output data')
flags.DEFINE_integer('meta_batch_size', 16, 'number of tasks sampled per meta-update, not nums tasks')
flags.DEFINE_integer('num_samples_each_task', 12, 'number of samples sampling from each task when training, inner_batch_size')
flags.DEFINE_integer('test_update_batch_size', -1, 'number of examples used for gradient update during adapting (K=1,3,5 in experiment, K-shot); -1: M.')
flags.DEFINE_integer('metatrain_iterations', 5001, 'number of metatraining iterations.')
flags.DEFINE_integer('num_updates', 5, 'number of inner gradient updates during training.')
flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.')
flags.DEFINE_integer('num_samples', 2637, 'total number of number of samples in FJ and FL.')
flags.DEFINE_float('update_lr', 1e-1, 'learning rate in meta-learning task')
flags.DEFINE_float('meta_lr', 1e-4, 'the base learning rate of meta learning process')
# flags.DEFINE_bool('train', False, 'True to train, False to test.')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')
flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
def train(model, saver, sess, exp_string, tasks, resume_itr):
SUMMARY_INTERVAL = 100
SAVE_INTERVAL = 1000
PRINT_INTERVAL = 1000
TEST_PRINT_INTERVAL = PRINT_INTERVAL * 5
print('Done model initializing, starting training...')
prelosses, postlosses = [], []
if resume_itr != FLAGS.pretrain_iterations + FLAGS.metatrain_iterations - 1:
if FLAGS.log:
train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string, sess.graph)
for itr in range(resume_itr, FLAGS.pretrain_iterations + FLAGS.metatrain_iterations):
batch_x, batch_y, cnt_sample = tasksbatch_generator(tasks, FLAGS.meta_batch_size
, FLAGS.num_samples_each_task,
FLAGS.dim_input, FLAGS.dim_output) # task_batch[i]: (x, y, features)
# batch_y = _transform_labels_to_network_format(batch_y, FLAGS.num_classes)
# inputa = batch_x[:, :int(FLAGS.num_samples_each_task/2), :] # a used for training
# labela = batch_y[:, :int(FLAGS.num_samples_each_task/2), :]
# inputb = batch_x[:, int(FLAGS.num_samples_each_task/2):, :] # b used for testing
# labelb = batch_y[:, int(FLAGS.num_samples_each_task/2):, :]
inputa = batch_x[:, :int(len(batch_x[0]) / 2), :] # a used for training
labela = batch_y[:, :int(len(batch_y[0]) / 2), :]
inputb = batch_x[:, int(len(batch_x[0]) / 2):, :] # b used for testing
labelb = batch_y[:, int(len(batch_y[0]) / 2):, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela,
model.labelb: labelb, model.cnt_sample: cnt_sample}
if itr < FLAGS.pretrain_iterations:
input_tensors = [model.pretrain_op] # for comparison
else:
input_tensors = [model.metatrain_op] # meta_train
if (itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0):
input_tensors.extend([model.summ_op, model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
result = sess.run(input_tensors, feed_dict)
if itr % SUMMARY_INTERVAL == 0:
prelosses.append(result[-2])
if FLAGS.log:
train_writer.add_summary(result[1], itr) # add summ_op
postlosses.append(result[-1])
if (itr != 0) and itr % PRINT_INTERVAL == 0:
if itr < FLAGS.pretrain_iterations:
print_str = 'Pretrain Iteration ' + str(itr)
else:
print_str = 'Iteration ' + str(itr - FLAGS.pretrain_iterations)
print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
print(print_str)
# print('meta_lr:'+str(sess.run(model.meta_lr)))
prelosses, postlosses = [], []
# save model
if (itr != 0) and itr % SAVE_INTERVAL == 0:
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
# TODO: Once the meta loss arrive at certain threshold, break the iteration
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
def test(model, saver, sess, exp_string, elig_tasks, num_updates=5):
# few-shot learn LSM model of each task
# print('start evaluation...\n' + 'meta_lr:' + str(model.meta_lr) + 'update_lr:' + str(num_updates))
print(exp_string)
total_Ytest = []
total_Ypred = []
total_Ytest1 = []
total_Ypred1 = []
sum_accuracies = []
sum_accuracies1 = []
for i in range(len(elig_tasks)):
batch_x, batch_y = sample_generator(elig_tasks[i], FLAGS.dim_input, FLAGS.dim_output) # only one task samples
if FLAGS.test_update_batch_size == -1:
inputa = batch_x[:, :int(len(batch_x[0]) / 2), :] # a used for fine tuning
labela = batch_y[:, :int(len(batch_y[0]) / 2), :]
inputb = batch_x[:, int(len(batch_x[0]) / 2):, :] # b used for testing
labelb = batch_y[:, int(len(batch_y[0]) / 2):, :]
else:
inputa = batch_x[:, :FLAGS.test_update_batch_size, :] # setting K-shot K here
labela = batch_y[:, :FLAGS.test_update_batch_size, :]
inputb = batch_x[:, FLAGS.test_update_batch_size:, :]
labelb = batch_y[:, FLAGS.test_update_batch_size:, :]
#feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb}
"""few-steps tuning"""
with tf.variable_scope('model', reuse=True): # np.normalize()里Variable重用
task_output = model.forward(inputa[0], model.weights, reuse=True)
task_loss = model.loss_func(task_output, labela[0])
grads = tf.gradients(task_loss,list(model.weights.values()))
gradients = dict(zip(model.weights.keys(), grads))
fast_weights = dict(zip(model.weights.keys(), [model.weights[key] -
model.update_lr*gradients[key] for key in model.weights.keys()]))
for j in range(num_updates - 1):
loss = model.loss_func(model.forward(inputa[0], fast_weights, reuse=True), labela[0]) # fast_weight和grads(stopped)有关系,但不影响这里的梯度计算
grads = tf.gradients(loss, list(fast_weights.values()))
gradients = dict(zip(fast_weights.keys(), grads))
fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - model.update_lr*gradients[key] for key in fast_weights.keys()]))
"""后续考虑用跑op"""
# for j in range(num_update):
# sess.run(model.pretrain_op, feed_dict=feed_dict) # num_update次迭代 # 存储各task模型
# saver.save(sess, './checkpoint_dir/task' + str(i) + 'model')
"""Test Evaluation"""
output = model.forward(inputb[0], fast_weights, reuse=True) # 注意测试model.weight是否为更新后值
Y_array = sess.run(tf.nn.softmax(output)) # , feed_dict=feed_dict
total_Ypred1.extend(Y_array)
total_Ytest1.extend(labelb[0]) # save
Y_test = []
for j in range(len(labelb[0])):
Y_test.append(labelb[0][j][0])
total_Ytest.append(labelb[0][j][0])
Y_pred = []
for j in range(len(labelb[0])):
if Y_array[j][0] > Y_array[j][1]:
Y_pred.append(1)
total_Ypred.append(1)
else:
Y_pred.append(0)
total_Ypred.append(0)
accuracy = accuracy_score(Y_test, Y_pred)
sum_accuracies.append(accuracy)
# print('Test_Accuracy: %f' % accuracy)
# save prediction
total_Ypred1 = np.array(total_Ypred1)
total_Ytest1 = np.array(total_Ytest1)
arr = np.hstack((total_Ypred1, total_Ytest1))
writer = pd.ExcelWriter('mode' + str(FLAGS.mode) + 'predict.xlsx')
data_df =
|
pd.DataFrame(arr)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_lattice():
df = pd.DataFrame(data_lattice)
filename = os.path.join(
testdir, "tabula/icdar2013-dataset/competition-dataset-us/us-030.pdf"
)
tables = camelot.read_pdf(filename, pages="2")
assert_frame_equal(df, tables[0].df)
def test_lattice_table_rotated():
df = pd.DataFrame(data_lattice_table_rotated)
filename = os.path.join(testdir, "clockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
def test_lattice_two_tables():
df1 = pd.DataFrame(data_lattice_two_tables_1)
df2 = pd.DataFrame(data_lattice_two_tables_2)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename)
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_lattice_table_regions():
df = pd.DataFrame(data_lattice_table_regions)
filename = os.path.join(testdir, "table_region.pdf")
tables = camelot.read_pdf(filename, table_regions=["170,370,560,270"])
assert_frame_equal(df, tables[0].df)
def test_lattice_table_areas():
df = pd.DataFrame(data_lattice_table_areas)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename, table_areas=["80,693,535,448"])
assert_frame_equal(df, tables[0].df)
def test_lattice_process_background():
df = pd.DataFrame(data_lattice_process_background)
filename = os.path.join(testdir, "background_lines_1.pdf")
tables = camelot.read_pdf(filename, process_background=True)
assert_frame_equal(df, tables[1].df)
def test_lattice_copy_text():
df = pd.DataFrame(data_lattice_copy_text)
filename = os.path.join(testdir, "row_span_1.pdf")
tables = camelot.read_pdf(filename, line_scale=60, copy_text="v")
assert_frame_equal(df, tables[0].df)
def test_lattice_shift_text():
df_lt = pd.DataFrame(data_lattice_shift_text_left_top)
df_disable = pd.DataFrame(data_lattice_shift_text_disable)
df_rb = pd.DataFrame(data_lattice_shift_text_right_bottom)
filename = os.path.join(testdir, "column_span_2.pdf")
tables = camelot.read_pdf(filename, line_scale=40)
assert df_lt.equals(tables[0].df)
tables = camelot.read_pdf(filename, line_scale=40, shift_text=[""])
assert df_disable.equals(tables[0].df)
tables = camelot.read_pdf(filename, line_scale=40, shift_text=["r", "b"])
assert df_rb.equals(tables[0].df)
def test_lattice_arabic():
df =
|
pd.DataFrame(data_arabic)
|
pandas.DataFrame
|
import os
import gc
import time
import joblib
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, median_absolute_error
from datetime import datetime
from threading import RLock, Thread
from .util import PoolObject, yobit_err, form_traceback
from .constants import predictor_main_cols, predictor_target_col, predictor_dataset, \
predictor_dummy_cols, trained_model, learning_period
lock = RLock()
class Predictor(PoolObject):
def __init__(self):
PoolObject.__init__(self)
self.dummies = None
self.model = None
self.model_date = None
self.metrics = None
self.load_stuff()
self.data = None
self.train_data = None
self.val_data = None
self.available = True
print('predictor: started')
def get_report(self):
report = ' - Model last training date:\n'
report += ' * {0}\n'.format(self.model_date)
report += ' - Model metrics:\n'
if self.metrics is None:
report += ' * None\n'
return report
for k, v in self.metrics.items():
if k == 'cols':
report += ' * {0}:\n'.format(k)
for token in v.split(','):
report += ' > {0}\n'.format(token)
else:
report += ' * {0}: {1:.2f}\n'.format(k, v)
return report
def learn(self):
self.read_and_prepare_data()
self.train()
def predict(self, signal):
self.data = pd.DataFrame(signal, index=[0])
if self.model is None:
self.load_stuff()
self.read_and_prepare_data(to_predict=True)
data_use_cols = self.data[predictor_main_cols]
data_dummied = data_use_cols.reindex(columns=self.dummies, fill_value=0)
data_dummied.pop(predictor_target_col)
x = data_dummied
preds = self.model.predict(x)
return preds[0]
def read_and_prepare_data(self, to_predict=False):
if not to_predict:
self.data = pd.read_csv(predictor_dataset)
self.data = self.data[self.data['1h_max'].notnull()]
train_size = int(self.data.shape[0] * 0.75)
self.data = self.data.iloc[-train_size:].reset_index(drop=True)
self.data['date'] =
|
pd.to_datetime(self.data['date'], format='%Y-%m-%d %H:%M:%S')
|
pandas.to_datetime
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def read_hydrophobicity_scale(seq, isNew=False):
seq_dataFrame = pd.DataFrame({"oneLetterCode":list(seq)})
HFscales = pd.read_table("~/opt/small_script/Whole_residue_HFscales.txt")
if not isNew:
# Octanol Scale
# new and old difference is at HIS.
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS+" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
else:
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS0" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
HFscales_with_oneLetterCode = HFscales.assign(oneLetterCode=HFscales.AA.str.upper().map(code)).dropna()
data = seq_dataFrame.merge(HFscales_with_oneLetterCode, on="oneLetterCode", how="left")
return data
def create_zim(seqFile, isNew=False):
a = seqFile
seq = getFromTerminal("cat " + a).rstrip()
data = read_hydrophobicity_scale(seq, isNew=isNew)
z = data["DGwoct"].values
np.savetxt("zim", z, fmt="%.2f")
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
tmp[21] = new_chain
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def structure_prediction_run(protein):
print(protein)
protocol_list = ["awsemer", "frag", "er"]
do = os.system
cd = os.chdir
cd(protein)
# run = "frag"
for protocol in protocol_list:
do("rm -r " + protocol)
do("mkdir -p " + protocol)
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
# do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
fileName = protein + "_multi.in"
backboneFile = "fix_backbone_coeff_" + protocol
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line.replace("fix_backbone_coeff_er", backboneFile)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
cd("..")
# do("")
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = set()
for atom in one:
residue, *_ = atom.split()
# print(residue)
all_residues.add(int(residue))
for test in range(int(i), int(i)+int(n)):
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def read_complete_temper_2(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False, disReal=False, dis_h56=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False):
all_data_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if dis_h56:
tmp = pd.read_csv(location+f"distance_h56_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp1 = pd.read_csv(location+f"distance_h12_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp2 = pd.read_csv(location+f"distance_h34_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
tmp1.columns = tmp1.columns.str.strip()
tmp2.columns = tmp2.columns.str.strip()
wham = pd.concat([wham, tmp, tmp1, tmp2],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete_{i}.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.{i}.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
if goEnergy:
tmp = pd.read_csv(location+f"Go_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if goEnergy3H:
nEnergy = pd.read_csv(location+f"Go_3helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
if goEnergy4H:
nEnergy = pd.read_csv(location+f"Go_4helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
data = pd.concat([wham, dis, energy, rgs, lipid], axis=1)
# lipid = lipid[["Steps","Lipid","Run"]]
all_data_list.append(data)
data = pd.concat(all_data_list)
file = f"../log{rerun}/log.lammps"
temper = pd.read_table(location+file, skiprows=2, sep=' ')
temper = temper.melt(id_vars=['Step'], value_vars=['T' + str(i) for i in range(n)], value_name="Temp", var_name="Run")
temper["Run"] = temper["Run"].str[1:].astype(int)
temper["Temp"] = "T" + temper["Temp"].astype(str)
# print(temper)
# print(wham)
t2 = temper.merge(data, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
# print(t2)
t3 = t2.assign(TotalE=t2.Energy + t2.Lipid)
return t3.sort_values(["Step", "Run"]).reset_index(drop=True)
def process_complete_temper_data_3(pre, data_folder, folder_list, rerun=-1, end=-1, n=12, bias="dis", qnqc=False, average_z=False, disReal=False, dis_h56=False, localQ=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False, label=""):
print("process temp data")
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
# this one only consider rerun >=0, for the case rerun=-1, move log.lammps to log0
for i in range(rerun, end, -1):
all_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper_2(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ, disReal=disReal, dis_h56=dis_h56, goEnergy=goEnergy, goEnergy3H=goEnergy3H, goEnergy4H=goEnergy4H)
print(data.shape)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data.assign(BiasTo=bias_num))
data = pd.concat(all_data_list).reset_index(drop=True)
# if localQ:
# print("hi")
# else:
# data.to_csv(os.path.join(pre, folder, f"data/rerun_{i}.csv"))
# complete_data_list.append(data)
# temps = list(dic.keys())
# complete_data = pd.concat(complete_data_list)
name = f"rerun_{2*i}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i}e7 & Step <= {2*i+1}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
name = f"rerun_{2*i+1}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i+1}e7 & Step <= {2*i+2}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
def move_data4(data_folder, freeEnergy_folder, folder_list, temp_dict_mode=1, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
# dic = {"T_defined":300, "T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
if temp_dict_mode == 1:
dic = {"T0":280, "T1":300, "T2":325, "T3":350, "T4":375, "T5":400, "T6":450, "T7":500, "T8":550, "T9":600, "T10":650, "T11":700}
if temp_dict_mode == 2:
dic = {"T0":280, "T1":290, "T2":300, "T3":315, "T4":335, "T5":355, "T6":380, "T7":410, "T8":440, "T9":470, "T10":500, "T11":530}
if temp_dict_mode == 3:
dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
if temp_dict_mode == 4:
dic = {"T0":300, "T1":335, "T2":373, "T3":417, "T4":465, "T5":519, "T6":579, "T7":645, "T8":720, "T9":803, "T10":896, "T11":1000}
# read in complete.feather
data_list = []
for folder in folder_list:
tmp = pd.read_feather(data_folder + folder +".feather")
data_list.append(tmp)
data = pd.concat(data_list)
os.system("mkdir -p "+freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 0 & Step <= 1e7'
if sample_range_mode == 1:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 2:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 3:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 4:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 5:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == 6:
queryCmd ='Step > 6e7 & Step <= 7e7'
elif sample_range_mode == 7:
queryCmd ='Step > 7e7 & Step <= 8e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
if sample_range_mode == -2:
tmp = oneTempAndBias.reset_index(drop=True)
else:
tmp = oneTempAndBias.query(queryCmd).reset_index()
if average_z < 5:
chosen_list = ["TotalE", "Qw", "Distance"]
elif average_z == 5:
chosen_list = ["TotalE", "Qw", "DisReal"]
chosen_list += ["z_h6"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2 or average_z == 3:
chosen_list += ["z_h6"]
if average_z == 3:
chosen_list += ["DisReal"]
if average_z == 4:
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["DisReal"]
if average_z == 6:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h56"]
if average_z == 7:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h56"] = tmp["z_h5"] + tmp["z_h6"]
tmp["z_h14"] = tmp["z_h1"] + tmp["z_h2"] + tmp["z_h3"] + tmp["z_h4"]
chosen_list += ["z_h14"]
chosen_list += ["z_h56"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h12"]
chosen_list += ["Dis_h34"]
chosen_list += ["Dis_h56"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
if chosen_mode == 2:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg,
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg)
# print(tmp.count())
if chosen_mode == 3:
chosen_list += ["AMH-Go", "Lipid", "Membrane", "Rg"]
chosen = tmp[chosen_list]
if chosen_mode == 4:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
if chosen_mode == 5:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE/10,
TotalE_perturb_go_p=0,
Go=tmp["AMH-Go"])
if chosen_mode == 6:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH,
TotalE_4=tmp.TotalE + tmp.AMH,
TotalE_5=tmp.AMH)
if chosen_mode == 7:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_3H,
TotalE_4=tmp.TotalE + tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.1*tmp.AMH,
TotalE_6=tmp.TotalE + 0.2*tmp.AMH)
if chosen_mode == 8:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H,
TotalE_4=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_6=tmp.TotalE + 0.5*tmp.AMH_3H)
if chosen_mode == 9:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1go_m=chosen.TotalE_2 - kgo*tmp["AMH-Go"],
TotalE_perturb_1go_p=chosen.TotalE_2 + kgo*tmp["AMH-Go"],
TotalE_perturb_2lipid_m=chosen.TotalE_2 - tmp.Lipid,
TotalE_perturb_2lipid_p=chosen.TotalE_2 + tmp.Lipid,
TotalE_perturb_3mem_m=chosen.TotalE_2 - tmp.Membrane,
TotalE_perturb_3mem_p=chosen.TotalE_2 + tmp.Membrane,
TotalE_perturb_4rg_m=chosen.TotalE_2 - tmp.Rg,
TotalE_perturb_4rg_p=chosen.TotalE_2 + tmp.Rg,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 10:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.3*tmp.Lipid,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.3*tmp.Lipid,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.5*tmp.Lipid,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.5*tmp.Lipid,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 11:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 1.1*0.1*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_2=tmp.TotalE + 1.1*0.2*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_3=tmp.TotalE + 1.1*0.5*tmp.AMH_4H + 0.1*tmp["AMH-Go"])
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.1*tmp.Membrane,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.1*tmp.Membrane,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.2*tmp.Membrane,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.2*tmp.Membrane,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 12:
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
z_h56=(tmp.z_h5 + tmp.z_h6)/2)
if chosen_mode == 13:
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
force = 0.1
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal - 25.1)*force,
TotalE_3=tmp.TotalE - (tmp.DisReal - 25.1)*force,
TotalE_4=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal)*force)
chosen.to_csv(freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
# perturbation_table = {0:"original", 1:"m_go",
# 2:"p_go", 3:"m_lipid",
# 4:"p_lipid", 5:"m_mem",
# 6:"p_mem", 7:"m_rg", 8:"p_rg"}
def compute_average_z(dumpFile, outFile):
# input dump, output z.dat
z_list = []
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
z_list.append(z)
f.write(str(z)+"\n")
def compute_average_z_2(dumpFile, outFile):
# input dump, output z.dat
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
f.write("z_average, abs_z_average, z_h1, z_h2, z_h3, z_h4, z_h5, z_h6\n")
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
f.write(str(z)+ ", ")
z = np.abs(b).mean(axis=0)[2]
f.write(str(z)+ ", ")
for count, (i,j) in enumerate(helices_list):
i = i - 91
j = j - 91
z = np.mean(b[i:j], axis=0)[2]
if count == 5:
f.write(str(z))
else:
f.write(str(z)+ ", ")
f.write("\n")
def read_simulation_2(location=".", i=-1, qnqc=False, average_z=False, localQ=False, disReal=False, **kwargs):
file = "lipid.dat"
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.dat"
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.dat"
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.dat"
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.dat"
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
data = pd.concat([wham, dis, energy, rgs, lipid], axis=1)
t3 = data.assign(TotalE=data.Energy + data.Lipid)
return t3.reset_index(drop=True)
def read_folder(location, match="", **kwargs):
runFolders = os.listdir(location+"/simulation")
if match == "qbias":
runFolders = [f for f in runFolders if re.match(r'qbias_[0-9]+', f)]
else:
runFolders = [f for f in runFolders if re.match(r'[0-9]+', f)]
print(runFolders)
data_list = []
for run in runFolders:
tmp = read_simulation_2(location+"/simulation/"+run+"/0/", **kwargs).assign(Run=run)
data_list.append(tmp)
return pd.concat(data_list).reset_index(drop=True)
def read_variable_folder(location, match="*_", **kwargs):
variables = glob.glob(os.path.join(location, match))
print(variables)
data_list = []
for variableFolder in variables:
tmp = variableFolder.split("/")[-1]
data_list.append(read_folder(variableFolder, **kwargs).assign(Folder=tmp))
data = pd.concat(data_list)
name = f"{datetime.today().strftime('%d_%h_%H%M%S')}.feather"
data.reset_index(drop=True).to_feather(name)
def downloadPdb(pdb_list):
os.system("mkdir -p original_pdbs")
for pdb_id in pdb_list:
pdb = f"{pdb_id.lower()[:4]}"
pdbFile = pdb+".pdb"
if not os.path.isfile("original_pdbs/"+pdbFile):
pdbl = PDBList()
name = pdbl.retrieve_pdb_file(pdb, pdir='.', file_format='pdb')
os.system(f"mv {name} original_pdbs/{pdbFile}")
def cleanPdb(pdb_list, chain=None):
os.system("mkdir -p cleaned_pdbs")
for pdb_id in pdb_list:
pdb = f"{pdb_id.lower()[:4]}"
if chain is None:
if len(pdb_id) == 5:
Chosen_chain = pdb_id[4].upper()
else:
assert(len(pdb_id) == 4)
Chosen_chain = "A"
else:
Chosen_chain = chain
pdbFile = pdb+".pdb"
# clean pdb
fixer = PDBFixer(filename="original_pdbs/"+pdbFile)
# remove unwanted chains
chains = list(fixer.topology.chains())
chains_to_remove = [i for i, x in enumerate(chains) if x.id not in Chosen_chain]
fixer.removeChains(chains_to_remove)
fixer.findMissingResidues()
# add missing residues in the middle of a chain, not ones at the start or end of the chain.
chains = list(fixer.topology.chains())
keys = fixer.missingResidues.keys()
# print(keys)
for key in list(keys):
chain = chains[key[0]]
if key[1] == 0 or key[1] == len(list(chain.residues())):
del fixer.missingResidues[key]
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.removeHeterogens(keepWater=False)
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.addMissingHydrogens(7.0)
PDBFile.writeFile(fixer.topology, fixer.positions, open("cleaned_pdbs/"+pdbFile, 'w'))
def getAllChains(pdbFile):
fixer = PDBFixer(filename=pdbFile)
# remove unwanted chains
chains = list(fixer.topology.chains())
a = ""
for i in chains:
a += i.id
return ''.join(sorted(set(a.upper().replace(" ", ""))))
def add_chain_to_pymol_pdb(location):
# location = "/Users/weilu/Research/server/nov_2018/openMM/random_start/1r69.pdb"
with open("tmp", "w") as out:
with open(location, "r") as f:
for line in f:
info = list(line)
if len(info) > 21:
info[21] = "A"
out.write("".join(info))
os.system(f"mv tmp {location}")
# ----------------------------depreciated---------------------------------------
def read_simulation(location):
file = "lipid.dat"
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
file = "energy.dat"
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
file = "addforce.dat"
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
# remove_columns = ['AddedForce', 'Dis12', 'Dis34', 'Dis56']
file = "rgs.dat"
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
file = "wham.dat"
wham = pd.read_csv(location+file)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
data = wham.merge(rgs, how='inner', left_on=["Steps"], right_on=["Steps"]).\
merge(dis, how='inner', left_on=["Steps"], right_on=["Steps"]).\
merge(energy, how='inner', left_on=["Steps"], right_on=["Steps"]).\
merge(lipid, how='inner', left_on=["Steps"], right_on=["Steps"])
data = data.assign(TotalE=data.Energy + data.Lipid)
return data
def process_complete_temper_data_2(pre, data_folder, folder_list, rerun=-1, n=12, bias="dis", qnqc=False, average_z=False, localQ=False):
print("process temp data")
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
# this one only consider rerun >=0, for the case rerun=-1, move log.lammps to log0
for i in range(rerun+1):
all_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper_2(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ)
print(data.shape)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data.assign(BiasTo=bias_num))
data = pd.concat(all_data_list).reset_index(drop=True)
# if localQ:
# print("hi")
# else:
# data.to_csv(os.path.join(pre, folder, f"data/rerun_{i}.csv"))
# complete_data_list.append(data)
# temps = list(dic.keys())
# complete_data = pd.concat(complete_data_list)
name = f"rerun_{i}_{dateAndTime}.feather"
data.reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder)
def move_data3(data_folder, freeEnergy_folder, folder, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
dic = {"T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
# read in complete.feather
data = pd.read_feather(data_folder + folder +".feather")
os.system("mkdir -p "+freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 0 & Step <= 1e7'
if sample_range_mode == 1:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 2:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 3:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 4:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 5:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
tmp = oneTempAndBias.query(queryCmd)
chosen_list = ["TotalE", "Qw", "Distance"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2:
chosen_list += ["z_h6"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
# print(tmp.count())
chosen.to_csv(freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
def move_data2(data_folder, freeEnergy_folder, folder, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
dic = {"T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
# read in complete.feather
data = pd.read_feather(data_folder + folder +".feather")
os.system("mkdir -p "+freeEnergy_folder+folder+sub_mode_name+"/data")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 1:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 2:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 3:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 4:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
tmp = oneTempAndBias.query(queryCmd)
chosen_list = ["TotalE", "Qw", "Distance"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2:
chosen_list += ["z_h6"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
# print(tmp.count())
chosen.to_csv(freeEnergy_folder+folder+sub_mode_name+f"/data/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
# chosen
def make_metadata_2(cwd=".", k=1000.0, temps_list=["450"]):
files = glob.glob("../../data/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in files:
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def make_metadata(k=1000.0, temps_list=["450"]):
cwd = os.getcwd()
files = glob.glob("../data/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in files:
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def read_complete_temper(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False):
all_lipid_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file).assign(Run=i)
lipid.columns = lipid.columns.str.strip()
# lipid = lipid[["Steps","Lipid","Run"]]
all_lipid_list.append(lipid)
lipid = pd.concat(all_lipid_list)
all_rgs_list = []
for i in range(n):
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file).assign(Run=i)
rgs.columns = rgs.columns.str.strip()
# lipid = lipid[["Steps","Lipid","Run"]]
all_rgs_list.append(rgs)
rgs = pd.concat(all_rgs_list)
all_energy_list = []
for i in range(n):
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file).assign(Run=i)
energy.columns = energy.columns.str.strip()
energy = energy[["Steps", "AMH-Go", "Membrane", "Rg", "Run"]]
all_energy_list.append(energy)
energy = pd.concat(all_energy_list)
all_dis_list = []
for i in range(n):
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file).assign(Run=i)
dis.columns = dis.columns.str.strip()
remove_columns = ['AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
all_dis_list.append(dis)
dis = pd.concat(all_dis_list)
all_wham_list = []
for i in range(n):
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
if average_z:
z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.{i}.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
all_wham_list.append(wham)
wham = pd.concat(all_wham_list)
if rerun == -1:
file = "../log.lammps"
else:
file = f"../log{rerun}/log.lammps"
temper = pd.read_table(location+file, skiprows=2, sep=' ')
temper = temper.melt(id_vars=['Step'], value_vars=['T' + str(i) for i in range(n)], value_name="Temp", var_name="Run")
temper["Run"] = temper["Run"].str[1:].astype(int)
temper["Temp"] = "T" + temper["Temp"].astype(str)
t2 = temper.merge(wham, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
t3 = t2.merge(dis, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
t4 = t3.merge(lipid, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
t5 = t4.merge(energy, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
t6 = t5.merge(rgs, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
t6 = t6.assign(TotalE=t6.Energy + t6.Lipid)
return t6
def process_complete_temper_data(pre, data_folder, folder_list, rerun=-1, n=12, bias="dis", qnqc=False, average_z=False, localQ=False):
print("process temp data")
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
complete_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
all_data_list = []
if rerun == -1:
location = one_simulation + "/0/"
print(location)
data = read_complete_temper(location=location, n=n, rerun=rerun, qnqc=qnqc, average_z=average_z, localQ=localQ)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data)
else:
for i in range(rerun+1):
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data)
data = pd.concat(all_data_list).assign(BiasTo=bias_num).reset_index(drop=True)
if localQ:
print("hi")
else:
data.to_csv(os.path.join(pre, folder, f"data/bias_num.csv"))
complete_data_list.append(data)
# temps = list(dic.keys())
complete_data = pd.concat(complete_data_list)
name = f"{datetime.today().strftime('%d_%h_%H%M%S')}.feather"
complete_data.reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+folder+".feather")
def read_temper(n=4, location=".", rerun=-1, qnqc=False):
all_lipid_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file).assign(Run=i)
lipid.columns = lipid.columns.str.strip()
lipid = lipid[["Steps","Lipid","Run"]]
all_lipid_list.append(lipid)
lipid = pd.concat(all_lipid_list)
all_energy_list = []
for i in range(n):
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file).assign(Run=i)
energy.columns = energy.columns.str.strip()
energy = energy[["Steps", "AMH-Go", "Membrane", "Rg", "Run"]]
all_energy_list.append(energy)
energy = pd.concat(all_energy_list)
all_dis_list = []
for i in range(n):
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file).assign(Run=i)
dis.columns = dis.columns.str.strip()
remove_columns = ['AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
all_dis_list.append(dis)
dis = pd.concat(all_dis_list)
all_wham_list = []
for i in range(n):
file = "wham.{}.dat".format(i)
wham =
|
pd.read_csv(location+file)
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
AUTHOR = 'ipetrash'
import config
from common import get_log_list_by_author
author_by_log = get_log_list_by_author(config.SVN_FILE_NAME)
# Сбор коммитов за месяц/год
from datetime import datetime
records = [datetime(log.date.year, log.date.month, 1) for log in author_by_log[AUTHOR]]
import pandas as pd
df =
|
pd.DataFrame(data=records, columns=['year_month'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 3 15:28:09 2021
@author: <NAME>
"""
import pandas as pd
from collections import OrderedDict
import numpy as np
from numpy import linalg as LA
import math
from scipy.integrate import quad
import time
import copy
import itertools
def preprocessor(DataFrame):
"""
Function responsible for early preprocessing of the input data frames
- creates a list of body parts labeled by the neural net
- creates a trimmed frame, such that only relevant numerical data is included (i.e.: x, y coords and p-vals)
Parameters
----------
Data frames as inputs
Returns
-------
The function returns a list of these preprocessed frames.
returns a list of body parts as well.
"""
ResetColNames = {
DataFrame.columns.values[Ind]:Ind for Ind in range(len(DataFrame.columns.values))
}
ProcessedFrame = DataFrame.rename(columns=ResetColNames).drop([0], axis = 1)
BodyParts = list(OrderedDict.fromkeys(list(ProcessedFrame.iloc[0,])))
BodyParts = [Names for Names in BodyParts if Names != "bodyparts"]
TrimmedFrame = ProcessedFrame.iloc[2:,]
TrimmedFrame = TrimmedFrame.reset_index(drop=True)
return(TrimmedFrame, BodyParts)
def checkPVals(DataFrame, CutOff):
"""
Function responsible for processing p-values, namely omitting pvalues and their associated
coordinates by forward filling the last valid observation as defined by the cutoff limit (user defined)
Parameters
----------
Data frames as inputs
Takes the three columns that are associated with a label (X, Y, p-val), handled in the while loop
changes the
Returns
-------
The function returns a list of these preprocessed frames.
returns a list of body parts as well.
"""
#This loop assigns the first p-values in all label columns = 1, serve as reference point.
for Cols in DataFrame.columns.values:
if Cols % 3 == 0:
if float(DataFrame[Cols][0]) < CutOff:
DataFrame.loc[0, Cols] = 1.0
Cols = 3
#While loop iterating through every 3rd column (p-val column)
#ffill = forward fill, propagates last valid observation forward.
#Values with p-val < cutoff masked.
while Cols <= max(DataFrame.columns.values):
Query = [i for i in range(Cols-2, Cols+1)]
DataFrame[Query] = DataFrame[Query].mask(pd.to_numeric(DataFrame[Cols], downcast="float") < CutOff).ffill()
Cols += 3
return(DataFrame)
def predictLabelLocation(DataFrame, CutOff, LabelsFrom, colNames, PredictLabel):
"""
Function responsible for processing p-values, namely omitting
Parameters
----------
Data frames as inputs
Takes the three columns that are associated with a label (X, Y, p-val), handled in the while loop
changes the
Returns
-------
The function returns a list of these preprocessed frames.
returns a list of body parts as well.
"""
OldColumns = list(DataFrame.columns.values)
FeatureList = ["_x", "_y", "_p-val"]
NewCols = [f"{ColName}{Feature}" for ColName, Feature in itertools.product(colNames, FeatureList)]
DataFrame = DataFrame.rename(columns={DataFrame.columns.values[Ind]:NewCols[Ind] for Ind in range(len(NewCols))})
NewColumns = list(DataFrame.columns.values)
for Cols in DataFrame.columns.values:
DataFrame[Cols] = pd.to_numeric(DataFrame[Cols], downcast="float")
#If the direction vector remains the same, there will be an over-estimation in the new position of the head if you continue to reuse
#the same direction vector. Maybe scale the direction vector.
ReferenceDirection = []
ScaledVec = []
BodyPart = []
for Ind, PVals in enumerate(DataFrame[f"{PredictLabel}_p-val"]):
if (PVals < CutOff):
##############
#Choose surrounding label
##############
AdjacentLabel = [Label for Label in LabelsFrom if DataFrame[f"{Label}_p-val"][Ind] >= CutOff]
if ((len(AdjacentLabel) != 0) and (Ind != 0)):
if (DataFrame[f"{PredictLabel}_p-val"][Ind - 1] >= CutOff):
##########
#Create Direction Vectors between the first adjacent label available in the list
#Parallelogram law
##########
DirectionVec = [DataFrame[f"{PredictLabel}_x"][Ind - 1] - DataFrame[f"{AdjacentLabel[0]}_x"][Ind - 1],
DataFrame[f"{PredictLabel}_y"][Ind - 1] - DataFrame[f"{AdjacentLabel[0]}_y"][Ind - 1]]
ReferenceDirection = DirectionVec
elif ((DataFrame[f"{PredictLabel}_p-val"][Ind - 1] < CutOff) and (len(ReferenceDirection) == 0)):
ReferenceDirection = [0, 0]
###########
#Compute the displacement between available surronding label
#Compute the vector addition (parallelogram law) and scale the Ind - 1 first available adjacent label by it
###########
Displacement = [DataFrame[f"{AdjacentLabel[0]}_x"][Ind] - DataFrame[f"{AdjacentLabel[0]}_x"][Ind - 1],
DataFrame[f"{AdjacentLabel[0]}_y"][Ind] - DataFrame[f"{AdjacentLabel[0]}_y"][Ind - 1]]
Scale = np.add(ReferenceDirection, Displacement)
DataFrame[f"{PredictLabel}_x"][Ind] = DataFrame[f"{AdjacentLabel[0]}_x"][Ind - 1] + Scale[0]
DataFrame[f"{PredictLabel}_y"][Ind] = DataFrame[f"{AdjacentLabel[0]}_y"][Ind - 1] + Scale[1]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 4.5
Norm = LA.norm(Scale)
# elif (len(AdjacentLabel) == 0):
# print(AdjacentLabel)
# print(max(ScaledVec), min(ScaledVec), np.std(ScaledVec), np.average(ScaledVec))
# print(BodyPart[ScaledVec.index(max(ScaledVec))])
PVAL_PREDICTEDLABEL = list(DataFrame[f"{PredictLabel}_p-val"])
DataFrame = DataFrame.rename(columns={NewColumns[Ind]: OldColumns[Ind] for Ind in range(len(OldColumns))})
return(DataFrame, PVAL_PREDICTEDLABEL)
def predictLabel_RotationMatrix(DataFrame, CutOff, LabelsFrom, colNames, PredictLabel):
OldColumns = list(DataFrame.columns.values)
FeatureList = ["_x", "_y", "_p-val"]
NewCols = [f"{ColName}{Feature}" for ColName, Feature in itertools.product(colNames, FeatureList)]
DataFrame = DataFrame.rename(columns={DataFrame.columns.values[Ind]:NewCols[Ind] for Ind in range(len(NewCols))})
NewColumns = list(DataFrame.columns.values)
for Cols in DataFrame.columns.values:
DataFrame[Cols] = pd.to_numeric(DataFrame[Cols], downcast="float")
ReferenceMid = []
FactorDict = {"Angle_Right":0, "Angle_Left":0}
VectorAngle = lambda V1, V2: math.acos((np.dot(V2, V1))/((np.linalg.norm(V2))*(np.linalg.norm(V1))))
RotationMatrixCW = lambda Theta: np.array(
[[math.cos(Theta), math.sin(Theta)],
[-1*math.sin(Theta), math.cos(Theta)]]
)
RotationMatrixCCW = lambda Theta: np.array(
[[math.cos(Theta), -1*math.sin(Theta)],
[math.sin(Theta), math.cos(Theta)]]
)
for Ind, PVals in enumerate(DataFrame[f"{PredictLabel}_p-val"]):
AdjacentLabel = [Label for Label in LabelsFrom if DataFrame[f"{Label}_p-val"][Ind] >= CutOff]
#If the Head label is poorly track initiate this statement
if (PVals < CutOff):
if ((DataFrame[f"{LabelsFrom[0]}_p-val"][Ind] >= CutOff) and (DataFrame[f"{LabelsFrom[1]}_p-val"][Ind] >= CutOff)):
MidPoint = [(DataFrame[f"{LabelsFrom[0]}_x"][Ind] + DataFrame[f"{LabelsFrom[1]}_x"][Ind])/2,
(DataFrame[f"{LabelsFrom[0]}_y"][Ind] + DataFrame[f"{LabelsFrom[1]}_y"][Ind])/2]
ReferenceMid = MidPoint
DataFrame[f"{PredictLabel}_x"][Ind] = ReferenceMid[0]
DataFrame[f"{PredictLabel}_y"][Ind] = ReferenceMid[1]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 3.5
elif (((DataFrame[f"{LabelsFrom[0]}_p-val"][Ind] or DataFrame[f"{LabelsFrom[1]}_p-val"][Ind]) < CutOff)
and (len(AdjacentLabel) != 0) and (DataFrame["Body_p-val"][Ind] >= CutOff)):
#Right
if ((DataFrame[f"{LabelsFrom[0]}_p-val"][Ind]) >= CutOff):
DVec_Right = [DataFrame[f"{LabelsFrom[0]}_x"][Ind] - DataFrame["Body_x"][Ind],
DataFrame[f"{LabelsFrom[0]}_y"][Ind] - DataFrame["Body_y"][Ind]]
ScaleRoation = np.dot(RotationMatrixCW(Theta=FactorDict["Angle_Right"]), DVec_Right)
DataFrame[f"{PredictLabel}_x"][Ind] = ScaleRoation[0] + DataFrame["Body_x"][Ind]
DataFrame[f"{PredictLabel}_y"][Ind] = ScaleRoation[1] + DataFrame["Body_y"][Ind]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 4.5
#Left
elif ((DataFrame[f"{LabelsFrom[1]}_p-val"][Ind]) >= CutOff):
DVec_Left = [DataFrame[f"{LabelsFrom[1]}_x"][Ind] - DataFrame["Body_x"][Ind],
DataFrame[f"{LabelsFrom[1]}_y"][Ind] - DataFrame["Body_y"][Ind]]
ScaleRoation = np.dot(RotationMatrixCCW(Theta=FactorDict["Angle_Left"]), DVec_Left)
DataFrame[f"{PredictLabel}_x"][Ind] = ScaleRoation[0] + DataFrame["Body_x"][Ind]
DataFrame[f"{PredictLabel}_y"][Ind] = ScaleRoation[1] + DataFrame["Body_y"][Ind]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 2.5
PVAL_PREDICTEDLABEL = list(DataFrame[f"{PredictLabel}_p-val"])
DataFrame = DataFrame.rename(columns={NewColumns[Ind]: OldColumns[Ind] for Ind in range(len(OldColumns))})
return(DataFrame, PVAL_PREDICTEDLABEL)
def predictLabel_MidpointAdjacent(DataFrame, CutOff, LabelsFrom, colNames, PredictLabel):
OldColumns = list(DataFrame.columns.values)
FeatureList = ["_x", "_y", "_p-val"]
NewCols = [f"{ColName}{Feature}" for ColName, Feature in itertools.product(colNames, FeatureList)]
DataFrame = DataFrame.rename(columns={DataFrame.columns.values[Ind]:NewCols[Ind] for Ind in range(len(NewCols))})
NewColumns = list(DataFrame.columns.values)
for Cols in DataFrame.columns.values:
DataFrame[Cols] = pd.to_numeric(DataFrame[Cols], downcast="float")
ReferenceMid = []
AngleDict = {"Angle_Right":0, "Angle_Left":0}
VectorAngle = lambda V1, V2: math.acos((np.dot(V2, V1))/((np.linalg.norm(V2))*(np.linalg.norm(V1))))
RotationMatrixCW = lambda Theta: np.array(
[[np.cos(Theta), np.sin(Theta)],
[-1*np.sin(Theta), np.cos(Theta)]]
)
RotationMatrixCCW = lambda Theta: np.array(
[[math.cos(Theta), -1*math.sin(Theta)],
[math.sin(Theta), math.cos(Theta)]]
)
# print(RotationMatrixCW(Theta = np.pi/2))
# print(RotationMatrixCCW(Theta = np.pi/2))
# breakpoint()
AngleList_Right = []
AngleList_Left = []
for Ind, PVals in enumerate(DataFrame[f"{PredictLabel}_p-val"]):
if ((PVals >= CutOff) and (DataFrame["Body_p-val"][Ind] >= CutOff)
and (DataFrame[f"{LabelsFrom[0]}_p-val"][Ind] >= CutOff) and (DataFrame[f"{LabelsFrom[1]}_p-val"][Ind] >= CutOff)):
DirectionVectorBody_Head = [DataFrame[f"{PredictLabel}_x"][Ind] - DataFrame["Body_x"][Ind],
DataFrame[f"{PredictLabel}_y"][Ind] - DataFrame["Body_y"][Ind]]
DirectionVectorR_Ear = [DataFrame[f"{LabelsFrom[0]}_x"][Ind] - DataFrame["Body_x"][Ind],
DataFrame[f"{LabelsFrom[0]}_y"][Ind] - DataFrame["Body_y"][Ind]]
DirectionVectorL_Ear = [DataFrame[f"{LabelsFrom[1]}_x"][Ind] - DataFrame["Body_x"][Ind],
DataFrame[f"{LabelsFrom[1]}_y"][Ind] - DataFrame["Body_y"][Ind]]
ThetaRight = VectorAngle(DirectionVectorBody_Head, DirectionVectorR_Ear)
ThetaLeft = VectorAngle(DirectionVectorBody_Head, DirectionVectorL_Ear)
AngleList_Right.append(ThetaRight)
AngleList_Left.append(ThetaLeft)
Theta_Right = np.average(AngleList_Right)
Theta_Left = np.average(AngleList_Left)
Theta_Right_std = np.std(AngleList_Right)
Theta_Left_std = np.std(AngleList_Left)
Counter = 0
C1 = 0
C2 = 0
for Ind, PVals in enumerate(DataFrame[f"{PredictLabel}_p-val"]):
# AdjacentLabel = [Label for Label in LabelsFrom if DataFrame[f"{Label}_p-val"][Ind] >= CutOff]
#If the Head label is poorly track initiate this statement
if (PVals < CutOff):
if ((DataFrame[f"{LabelsFrom[0]}_p-val"][Ind] >= CutOff) and (DataFrame[f"{LabelsFrom[1]}_p-val"][Ind] >= CutOff)):
MidPoint = [(DataFrame[f"{LabelsFrom[0]}_x"][Ind] + DataFrame[f"{LabelsFrom[1]}_x"][Ind])/2,
(DataFrame[f"{LabelsFrom[0]}_y"][Ind] + DataFrame[f"{LabelsFrom[1]}_y"][Ind])/2]
ReferenceMid = MidPoint
elif ((DataFrame[f"{LabelsFrom[0]}_p-val"][Ind] < CutOff) or (DataFrame[f"{LabelsFrom[1]}_p-val"][Ind] < CutOff)):
##############
#Choose surrounding label
##############
AdjacentLabel = [Label for Label in LabelsFrom if DataFrame[f"{Label}_p-val"][Ind] >= CutOff]
if ((len(AdjacentLabel) != 0) and (Ind != 0)):
if (DataFrame[f"{PredictLabel}_p-val"][Ind - 1] >= CutOff):
##########
#Create Direction Vectors between the first adjacent label available in the list
#Parallelogram law
##########
DirectionVec = [DataFrame[f"{PredictLabel}_x"][Ind - 1] - DataFrame[f"{AdjacentLabel[0]}_x"][Ind - 1],
DataFrame[f"{PredictLabel}_y"][Ind - 1] - DataFrame[f"{AdjacentLabel[0]}_y"][Ind - 1]]
ReferenceDirection = DirectionVec
elif ((DataFrame[f"{PredictLabel}_p-val"][Ind - 1] < CutOff) and (len(ReferenceDirection) == 0)):
ReferenceDirection = [0, 0]
###########
#Compute the displacement between available surronding label
#Compute the vector addition (parallelogram law) and scale the Ind - 1 first available adjacent label by it
###########
Displacement = [DataFrame[f"{AdjacentLabel[0]}_x"][Ind] - DataFrame[f"{AdjacentLabel[0]}_x"][Ind - 1],
DataFrame[f"{AdjacentLabel[0]}_y"][Ind] - DataFrame[f"{AdjacentLabel[0]}_y"][Ind - 1]]
Scale = np.add(ReferenceDirection, Displacement)
#Reference Mid is a 2D coordinate
ReferenceMid = [DataFrame[f"{AdjacentLabel[0]}_x"][Ind - 1] + Scale[0], DataFrame[f"{AdjacentLabel[0]}_y"][Ind - 1] + Scale[1]]
if DataFrame["Body_p-val"][Ind] >= CutOff:
BodyAdjacent_Vector = [DataFrame[f"{AdjacentLabel[0]}_x"][Ind] - DataFrame["Body_x"][Ind],
DataFrame[f"{AdjacentLabel[0]}_y"][Ind] - DataFrame["Body_y"][Ind]]
#Create a vector from the body label to the midpoint
ScaledVec = [ReferenceMid[0] - DataFrame["Body_x"][Ind],
ReferenceMid[1] - DataFrame["Body_y"][Ind]]
Theta = VectorAngle(BodyAdjacent_Vector, ScaledVec)
VectorPosition = np.cross(BodyAdjacent_Vector, ScaledVec)
if AdjacentLabel[0] == "Left_Ear":
if (VectorPosition < 0):
C1 += 1
RotateAngle = Theta_Left + Theta
RotateVector = RotationMatrixCCW(RotateAngle).dot(ScaledVec)
ReferenceMid = [DataFrame["Body_x"][Ind] + RotateVector[0], DataFrame["Body_y"][Ind] + RotateVector[1]]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 2.5
#Correct Position
elif (VectorPosition > 0):
C2 += 1
if (Theta < (Theta_Left - (2 * Theta_Left_std))):
RotateAngle = Theta_Left - Theta
RotateVector = RotationMatrixCCW(RotateAngle).dot(ScaledVec)
ReferenceMid = [DataFrame["Body_x"][Ind] + RotateVector[0], DataFrame["Body_y"][Ind] + RotateVector[1]]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 2.5
elif (Theta > (Theta_Left + (2 * Theta_Left_std))):
RotateAngle = Theta - Theta_Left
RotateVector = RotationMatrixCW(RotateAngle).dot(ScaledVec)
ReferenceMid = [DataFrame["Body_x"][Ind] + RotateVector[0], DataFrame["Body_y"][Ind] + RotateVector[1]]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 2.5
else:
DataFrame[f"{PredictLabel}_p-val"][Ind] = 4.0
elif AdjacentLabel[0] == "Right_Ear":
if VectorPosition < 0:
if (Theta < (Theta_Right - (2 * Theta_Right_std))):
RotateAngle = Theta_Right - Theta
RotateVector = RotationMatrixCW(RotateAngle).dot(ScaledVec)
ReferenceMid = [DataFrame["Body_x"][Ind] + RotateVector[0], DataFrame["Body_y"][Ind] + RotateVector[1]]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 1.5
elif (Theta > (Theta_Right + (2 * Theta_Right_std))):
RotateAngle = Theta - Theta_Right
RotateVector = RotationMatrixCCW(RotateAngle).dot(ScaledVec)
ReferenceMid = [DataFrame["Body_x"][Ind] + RotateVector[0], DataFrame["Body_y"][Ind] + RotateVector[1]]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 1.5
else:
DataFrame[f"{PredictLabel}_p-val"][Ind] = 4.0
elif VectorPosition > 0:
RotateAngle = Theta_Right + Theta
RotateVector = np.dot(RotationMatrixCW(RotateAngle), ScaledVec)
ReferenceMid = [DataFrame["Body_x"][Ind] + RotateVector[0], DataFrame["Body_y"][Ind] + RotateVector[1]]
DataFrame[f"{PredictLabel}_p-val"][Ind] = 1.5
# DataFrame[f"{PredictLabel}_p-val"][Ind] = 4.0
elif ((len(AdjacentLabel) == 0)):
Counter += 1
try:
DataFrame[f"{PredictLabel}_x"][Ind] = ReferenceMid[0]
DataFrame[f"{PredictLabel}_y"][Ind] = ReferenceMid[1]
except IndexError:
pass
if DataFrame[f"{PredictLabel}_p-val"][Ind] < 1.0:
DataFrame[f"{PredictLabel}_p-val"][Ind] = 3.5
# print(C1, C2)
# breakpoint()
PVAL_PREDICTEDLABEL = list(DataFrame[f"{PredictLabel}_p-val"])
#DataFrame.to_csv(r"F:\WorkFiles_XCELLeration\Video\DifferentApproaches\Combined_PgramRotation.csv")
DataFrame = DataFrame.rename(columns={NewColumns[Ind]: OldColumns[Ind] for Ind in range(len(OldColumns))})
return(DataFrame, PVAL_PREDICTEDLABEL)
def computeEuclideanDistance(DataFrame, BodyParts):
"""
Function responsible for computing the interframe Euclidean Distance
Applies the 2D Euclidean distance formula between frames on the coordinates of each tracked
label from DLC.
d(p, q) = sqrt(sum(q - p) ** 2))
- where p, q are 2D cartesian coordinates, in this case the coordinate labels
in sequential frames.
Parameters
----------
Data frames and body part strings as inputs
Returns
-------
The function returns a list of these frames
"""
DistanceVectors = []
ColsToDrop = [Cols for Cols in DataFrame if Cols % 3 == 0]
DataFrame = DataFrame.drop(ColsToDrop, axis = 1)
CreateDirectionalVectors = lambda Vec1, Vec2: [Vals2 - Vals1 for Vals1, Vals2 in zip(Vec1, Vec2)]
ComputeNorm = lambda Vec: np.sqrt(sum(x ** 2 for x in Vec))
for Cols1, Cols2 in zip(DataFrame.columns.values[:-1], DataFrame.columns.values[1:]):
if Cols2 - Cols1 == 1:
VectorizedFrame = list(zip(pd.to_numeric(DataFrame[Cols1], downcast="float"), pd.to_numeric(DataFrame[Cols2], downcast="float")))
DirectionalVectors = list(map(CreateDirectionalVectors, VectorizedFrame[:-1], VectorizedFrame[1:]))
Norm = list(map(ComputeNorm, DirectionalVectors))
DistanceVectors.append(Norm)
EDFrame = pd.DataFrame(data={BodyParts[Ind]: DistanceVectors[Ind] for Ind in range(len(DistanceVectors))})
return(EDFrame)
def computeHourlySums(DataFrameList):
"""
Function responsible for creating hourly sums, that is, the summed Euclidean
Distance for that hour (or .csv input). This represents the total motility of the
animal in the given time frame.
Parameters
----------
Data frame list as input
Returns
-------
A single dataframe containing the sums for that hour (or .csv input). The index will
act as the hour or timescale for that particular .csv, therefore it is important to ensure
that .csv files are in order.
"""
SumLists = []
for Frames in range(len(DataFrameList)):
SumFunction = DataFrameList[Frames].apply(np.sum, axis=0)
SummedFrame = pd.DataFrame(SumFunction)
SumLists.append(SummedFrame.transpose())
AdjustedFrame = pd.concat(SumLists).reset_index(drop=True)
return(AdjustedFrame)
def computeLinearEquations(HourlyFrame):
"""
Function responsible for creating linear equations from the hourly sums
Parameters
----------
Data frame as input
Returns
-------
A single dataframe containing the slope, intecept and hourly values of that line
"""
SlopeFunction = lambda Column: (((Column[Ind2] - Column[Ind1])/(Ind2 - Ind1)) for Ind1, Ind2 in zip(Column.index.values[:-1], Column.index.values[1:]))
Slope = [list(SlopeFunction(HourlyFrame[Cols])) for Cols in HourlyFrame]
InterceptFunction = lambda Column, Slopes, Time: ((ColVals - (SlopeVals * TimeVals))
for ColVals, SlopeVals, TimeVals in zip(Column, Slopes, Time))
Intercept = [list(InterceptFunction(HourlyFrame[Cols], Slope[rng], list(HourlyFrame.index.values))) for Cols, rng in zip(HourlyFrame, range(len(Slope)))]
Zipper = [[(slope, intercept, start, end) for slope, intercept, start, end in zip(Col1, Col2, HourlyFrame.index.values[:-1], HourlyFrame.index.values[1:])]
for Col1, Col2 in zip(Slope, Intercept)]
LinearEquationFrame = pd.DataFrame(data={
"LineEqn_{}".format(HourlyFrame.columns.values[Ind]): Zipper[Ind] for Ind in range(len(Zipper))
})
return(LinearEquationFrame)
def computeIntegrals(LinearEquationsFrame):
"""
Function responsible for computing the integral of the linear equation between two
consecutive time points
Parameters
----------
Data frame as input
Returns
-------
A single dataframe containing the integral values (Area under curve) for the respective
linear equation. Between consecutive time points.
"""
Integral = lambda m, x, b: (m*x) + b
IntegralList = [[quad(Integral, Vals[2], Vals[3], args = (Vals[0], Vals[1]))[0] for Vals in LinearEquationsFrame[Cols]] for Cols in LinearEquationsFrame]
ColNames = LinearEquationsFrame.columns.values
IntegralFrame = pd.DataFrame(data={
"Integral_{}".format(ColNames[Ind].split("_")[1]):IntegralList[Ind] for Ind in range(len(IntegralList))
})
return(IntegralFrame)
#These should be moved to a residual computations folder
def computeAveragePositionStationary(InputFrame, StationaryObjectsList):
StationaryDict = {StationaryObjectsList[Ind]: [0, 0] for Ind in range(len(StationaryObjectsList))}
duplicates = [Cols for Cols in InputFrame.columns.values]
#Know that coordinate data will only ever be 2D
#Should not operate under that apriori assumption
for Ind, Cols in enumerate(duplicates):
if Cols in duplicates and Cols + "_x" not in duplicates:
duplicates[duplicates.index(Cols)] = Cols + "_x"
else:
duplicates[duplicates.index(Cols)] = Cols + "_y"
InputFrame.columns = duplicates
for Cols in StationaryObjectsList:
XCoord = Cols + "_x"
YCoord = Cols + "_y"
AverageX = np.average(list(pd.to_numeric(InputFrame[XCoord], downcast="float")))
AverageY = np.average(list(pd.to_numeric(InputFrame[YCoord], downcast="float")))
StationaryDict[Cols][0] = AverageX
StationaryDict[Cols][1] = AverageY
StationaryFrame = pd.DataFrame(data=StationaryDict)
StationaryFrame = StationaryFrame.set_index(
|
pd.Series(["x", "y"])
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on August 08 08:44:25 2018
@author: <NAME>
This script is a part of the Watershed Planning Tool development project,
which is developed by Lockwood Andrews & Newnam Inc (LAN) for Harris
County Flood Control District (HCFCD).
The VB_Mgmt.py Files functions as a method for communicating with the WPT_NET executable for calling
a particular hec ras model running a computaiton and retreiving select data from the executable by means of accessing the executables output csv files.
the UpdateGDBs script developed to tie in tool results into a GIS geodatabase (GDB) .
"""
import os, sys
import subprocess
import pandas as pd
from json import loads
from random import uniform
from subprocess import CalledProcessError
import traceback
import time
from UtiltyMgmt import on_error, get_active_process, kras, ksub, clean_active_process_files, write_txt_file
from Config import WPTConfig
WPTConfig.init()
sys.path.append(os.path.dirname(__file__))
def vb_function_exit(fi, scratch_fldr):
kras()
ksub()
clean_active_process_files(scratch_fldr)
try:
if os.path.exists(fi):
os.remove(fi)
else:
pass
except:
pass
def check_if_file_locked(filepath, scratch_fldr):
"""Check's if file in file path is locked. If the file is locked an IO Error is thrown attempted to be force deleted.
Then checking if the file is locked and or present after.
Inputs:
[0] filepath - (str) file path to locked file
Ouptus:
[0] result - (Boolean) or (None) """
result = None
count = 0
while (result is None or result == True) and (count <= 4):
if os.path.exists(filepath):
try:
name, ext = os.path.splitext(os.path.basename(os.path.abspath(filepath)))
temp_name = 'test_rename'
dirnam = os.path.dirname(os.path.abspath(filepath))
os.rename(os.path.join(filepath), os.path.join(dirnam, temp_name+ext))
os.rename( os.path.join(dirnam, temp_name+ext), os.path.join(filepath))
result = False
except IOError:
msg = "\tFile {0} locked! Attempting to Remove File.\n"
time.sleep(0.1)
x = subprocess.Popen('FORCEDEL /Q "{0}"'.format(filepath), shell=False).wait()
result = True
count +=1
except:
lines = '{0}\n'.format (traceback.format_exc ())
on_error(WPTConfig.Options_NetworkFile, scratch_fldr, lines)
result = None
count += 1
else:
result = False
return result
def fetch_controller_data(prjfile, plantitle, scratch_fldr, out_data):
"""This function is used to work in unison with the "WPT_NET" Executable to operate as a go between with the HEC_RAS
Controller. The current scope of the vb executable is used to generate the output data used within the WPT code.
Args:
[0] prj file - (str) the file path to a hec-ras project file
[1] plantitle - (str) the exact title name of a hec-ras plan file
[2] scratch_fldr - (str) the file path to a folder to deposit generated output files. often linked
to the ras-scratch file.
[3] out_data - (str) the requested output data for the tool (i.e 'los_df', 'nodes', 'ws_df',
'vel_df', 'channel_info', 'inverts', or 'computes')
Outputs:
[0] - Depending on the out_data variable a csv file or nothing is generated. If a csv file is generated, the
funciton will interprete and return the csv as a dataframe. Otherwise nothing is passed (i.e. 'computes').
"""
# the out_data varialble will be used to establish what type of output data is expected to be returned from the executable
sfldr = "Scratch={0}".format(scratch_fldr)
result = None
count = 0
while result is None and count <=5:
flocked = check_if_file_locked(os.path.join(scratch_fldr,"{0}.csv".format(out_data)),scratch_fldr)
lines = ['Inputs: {0}, {1}, {2}\n'.format(prjfile,plantitle,out_data), '\t\tfile locked: {0}\n'.format(flocked)]
if os.path.exists(scratch_fldr) and flocked is False:
try:
exe = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'WPT_NET.exe')
if os.path.exists(exe):
exe = exe.replace('\\','/')
kras()
# write_txt_file (os.path.join (scratch_fldr , 'VB_Inputs.txt') ,
# lines , True)
clean_active_process_files(scratch_fldr)
time.sleep(uniform(0.4,2.4))
get_active_process(scratch_fldr, False)
subprocess.check_output('"{0}" "{1}" "{2}" "{3}" "{4}"'.format(exe, prjfile, plantitle, sfldr, out_data), shell=False)
time.sleep(4.0)
get_active_process(scratch_fldr, False)
time.sleep (0.3)
if out_data == 'los_df':
# multi-dim array
fl = os.path.join(scratch_fldr, '{0}.csv'.format(out_data))
if os.path.exists(fl):
result = pd.DataFrame.from_csv(fl, index_col=False)
vb_function_exit (fl , scratch_fldr)
return result
else:
pass
# print('File {0} DNE\n'.format(fl))
elif out_data == "nodes":
# list
fl = os.path.join(scratch_fldr, '{0}.csv'.format(out_data))
if os.path.exists(fl):
result = pd.DataFrame.from_csv(fl, index_col=False)
# prep and convert to list
result = [round(sta,4) for sta in result['Riv_Sta'].tolist()]
result.sort(reverse=True)
vb_function_exit (fl , scratch_fldr)
return result
else:
pass
# print('File {0} DNE\n'.format(fl))
elif out_data == "ws_df":
# multi-dim array
fl = os.path.join(scratch_fldr, '{0}.csv'.format(out_data))
if os.path.exists(fl):
result =
|
pd.DataFrame.from_csv(fl, index_col=False)
|
pandas.DataFrame.from_csv
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: data_explore.py
@time: 2019-05-06 17:22
"""
import pandas as pd
import math
import featuretools as ft
from feature_selector import FeatureSelector
from mayiutils.datasets.data_preprocessing import DataExplore as de
if __name__ == '__main__':
mode = 2
if mode == 7:
"""
放弃利用featuretools扩充特征,自己构建特征
"""
dfzy = pd.read_csv('zy_all_featured_claim.csv', parse_dates=['就诊结帐费用发生日期', '入院时间', '出院时间'],
encoding='gbk')
del dfzy['ROWNUM']
del dfzy['主被保险人客户号']
del dfzy['出险人客户号']
del dfzy['就诊结帐费用发生日期']
del dfzy['自费描述']
del dfzy['部分自付描述']
del dfzy['医保支付描述']
del dfzy['出院时间']
del dfzy['event_id']
# 构造特征
# 自费金额占比
dfzy['自费总金额'] = dfzy['自费金额'] + dfzy['部分自付金额']
# 自费总金额占费用金额比
dfzy['自费总金额占比'] = dfzy['自费总金额'] / dfzy['费用金额']
# 医保支付金额占比
dfzy['医保支付金额占比'] = dfzy['医保支付金额'] / dfzy['费用金额']
# 平均每次事件费用金额
dfzy['费用金额mean'] = dfzy['费用金额'] / dfzy['event_count']
# log
def tlog(x):
if x < 1:
x = 0
if x != 0:
x = math.log(x)
return x
dfzy['费用金额log'] = dfzy['费用金额'].apply(tlog)
dfzy['自费金额log'] = dfzy['自费金额'].apply(tlog)
dfzy['部分自付金额log'] = dfzy['部分自付金额'].apply(tlog)
dfzy['医保支付金额log'] = dfzy['医保支付金额'].apply(tlog)
dfzy['自费总金额log'] = dfzy['自费总金额'].apply(tlog)
dfzy['费用金额meanlog'] = dfzy['费用金额mean'].apply(tlog)
# 构建one-hot特征
def build_one_hot_features(df, cols):
for col in cols:
t =
|
pd.get_dummies(dfzy[col], prefix=col)
|
pandas.get_dummies
|
from collections import Counter
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, List
import pandas as pd
from analysis.src.python.evaluation.common.pandas_util import filter_df_by_single_value
from analysis.src.python.evaluation.paper_evaluation.comparison_with_other_tools.util import (
ComparisonColumnName, ERROR_CONST, TutorTask,
)
def sort_freq_dict(freq_dict: Dict[Any, int]) -> Dict[Any, int]:
return dict(sorted(freq_dict.items(), key=lambda item: item[1], reverse=True))
@dataclass
class TutorStatistics:
unique_users: int
task_to_freq: Dict[TutorTask, int]
task_to_error_freq: Dict[TutorTask, int]
error_to_freq: Dict[str, int]
fragments_with_error: int = 0
__separator: str = '----------'
def __init__(self, solutions_df: pd.DataFrame, to_drop_duplicates: bool = False):
if to_drop_duplicates:
solutions_df = solutions_df.drop_duplicates(ComparisonColumnName.SOLUTION.value)
self.unique_users = len(solutions_df[ComparisonColumnName.STUDENT_ID.value].unique())
self.task_to_freq = defaultdict(int)
self.task_to_error_freq = defaultdict(int)
self.error_to_freq = defaultdict(int)
for task in TutorTask:
task_df = filter_df_by_single_value(solutions_df, ComparisonColumnName.TASK_KEY.value, task.value)
self.task_to_freq[task] = task_df.shape[0]
errors_list = list(map(lambda e_l: e_l.split_to_batches(';'),
task_df[ComparisonColumnName.TUTOR_ERROR.value].dropna().values))
for cell_errors in errors_list:
for error in cell_errors:
self.error_to_freq[error.strip()] += 1
self.task_to_error_freq[task] += 1
self.fragments_with_error += 1
self.task_to_freq = sort_freq_dict(self.task_to_freq)
self.error_to_freq = sort_freq_dict(self.error_to_freq)
def print_tasks_stat(self) -> None:
print(f'Unique users count: {self.unique_users}')
print(f'Code snippets count: {sum(self.task_to_freq.values())}')
print('Tasks statistics:')
for task, freq in self.task_to_freq.items():
print(f'Task {task.value}: {freq} items; {self.task_to_error_freq[task]} with tutor errors')
print(self.__separator)
def print_error_stat(self) -> None:
print(f'{self.fragments_with_error} code fragments has errors during running by Tutor')
print(f'{len(self.error_to_freq.keys())} unique errors was found in Tutor')
print('Error statistics:')
for error, freq in self.error_to_freq.items():
print(f'{error}: {freq} items')
print(self.__separator)
@dataclass
class IssuesStatistics:
common_issue_to_freq: Dict[str, int]
tutor_uniq_issue_to_freq: Dict[str, int]
hyperstyle_uniq_issue_to_freq: Dict[str, int]
code_style_issues_count: int
fragments_count_with_code_style_issues: int
__separator: str = '----------'
# TODO: info and code style issues
def __init__(self, solutions_df: pd.DataFrame, to_drop_duplicates: bool = False):
if to_drop_duplicates:
solutions_df = solutions_df.drop_duplicates(ComparisonColumnName.SOLUTION.value)
self.common_issue_to_freq = defaultdict(int)
self.tutor_uniq_issue_to_freq = defaultdict(int)
self.hyperstyle_uniq_issue_to_freq = defaultdict(int)
solutions_df.apply(lambda row: self.__init_solution_df_row(row), axis=1)
self.common_issue_to_freq = sort_freq_dict(self.common_issue_to_freq)
self.tutor_uniq_issue_to_freq = sort_freq_dict(self.tutor_uniq_issue_to_freq)
self.hyperstyle_uniq_issue_to_freq = sort_freq_dict(self.hyperstyle_uniq_issue_to_freq)
self.code_style_issues_count = sum(solutions_df[ComparisonColumnName.CODE_STYLE_ISSUES_COUNT.value])
self.fragments_count_with_code_style_issues = len(list(
filter(lambda x: x != 0, solutions_df[ComparisonColumnName.CODE_STYLE_ISSUES_COUNT.value])))
@staticmethod
def __parse_issues(issues_str: str) -> List[str]:
if
|
pd.isna(issues_str)
|
pandas.isna
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 13:05:28 2018:
在版本3的基础上,根据pandas的join方法来求交集
根据从量表中筛选的样本,来获得符合要求的原始数据的路径
数据结构neuroimageDataPath//subject00001//files
也可以是任何的数据结构,只要给定subjName在哪里就行
总之,最后把file复制到其他地方(可以给每个subject限定某个符合条件file,比如以'.nii'结尾的file)
input:
# reference_file:需要复制的被试名字所在text文件(大表中的uid)
# keywork_of_reference_uid:如提取量表中唯一识别号的正则表达式
# ith_number_of_reference_uid: 量表中的唯一识别号有多个匹配项时,选择第几个 (比如有一个名字为subj0001_bold7000, 此时可能匹配到0001和7000,遇到这种情况选择第几个匹配项)
# keyword_of_parent_folder_containing_target_file:想把被试的哪个模态/或那个文件夹下的文件复制出来(如同时有'resting'和'dti'时,选择那个模态)
# matching_point_number_of_target_uid_in_backwards:与referenceid匹配的唯一识别号在倒数第几个block内(以target file为起点计算,第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'的唯一识别号在倒数第3个中
# keyword_of_target_file_uid:用来筛选mri数据中唯一识别号的正则表达式
# ith_number_of_targetfile_uid: target file中的唯一识别号有多个匹配项时,选择第几个.
# keyword_of_target_file_uid:用来筛选file的正则表达式或keyword
# targe_file_folder:原始数据的根目录
# save_path: 将原始数据copy到哪个大路径
# n_processess=5几个线程
# is_save_log:是否保存复制log
# is_copy:是否执行复制功能
# is_move:是否移动(0)
# save_into_one_or_more_folder:保存到每个被试文件夹下,还是保存到一个文件夹下
# save_suffix:文件保存的尾缀('.nii')
# is_run:是否真正对文件执行移动或复制(0)
# 总体来说被复制的文件放在如下的路径:save_path/saveFolderName/subjName/files
@author: <NAME>
new featrue:真多核多线程处理,类的函数统一返回self
匹配file name:正则表达式匹配
"""
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pandas as pd
import time
import os
import shutil
import sys
sys.path.append(
r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Utils')
class CopyFmri():
def __init__(
self,
reference_file=r'E:\wangfeidata\uid.txt',
targe_file_folder=r'E:\wangfeidata\FunImgARWD',
keywork_of_reference_uid='([1-9]\d*)',
ith_number_of_reference_uid=0,
keyword_of_target_file_uid='([1-9]\d*)',
ith_number_of_targetfile_uid=0,
matching_point_number_of_target_uid_in_backwards=2,
keywork_of_target_file_not_for_uid='nii',
keyword_of_parent_folder_containing_target_file='',
save_path=r'E:\wangfeidata',
n_processess=2,
is_save_log=1,
is_copy=0,
is_move=0,
save_into_one_or_more_folder='one_file_one_folder',
save_suffix='.nii',
is_run=0):
self.reference_file = reference_file
self.targe_file_folder = targe_file_folder
self.keywork_of_reference_uid = keywork_of_reference_uid
self.ith_number_of_reference_uid = ith_number_of_reference_uid
self.keyword_of_target_file_uid = keyword_of_target_file_uid
self.matching_point_number_of_target_uid_in_backwards = matching_point_number_of_target_uid_in_backwards
self.ith_number_of_targetfile_uid = ith_number_of_targetfile_uid
self.keywork_of_target_file_not_for_uid = keywork_of_target_file_not_for_uid
self.keyword_of_parent_folder_containing_target_file = keyword_of_parent_folder_containing_target_file
self.save_path = save_path
self.n_processess = n_processess
self.is_save_log = is_save_log
self.is_copy = is_copy
self.is_move = is_move
self.save_into_one_or_more_folder = save_into_one_or_more_folder
self.save_suffix = save_suffix
self.is_run = is_run
# %% process the input
def _after_init(self):
""" handle the init parameter
"""
# chech param
if self.is_copy == 1 & self.is_move == 1:
print('### Cannot copy and move at the same time! ###\n')
print('### please press Ctrl+C to close the progress ###\n')
# create save folder
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# read reference_file(excel or text)
try:
self.subjName_forSelect = pd.read_excel(
self.reference_file, dtype='str', header=None, index=None)
except BaseException:
self.subjName_forSelect = pd.read_csv(
self.reference_file, dtype='str', header=None)
print('###提取subjName_forSelect中的匹配成分,默认为数字###\n###当有多个匹配时默认是第1个###\n')
if self.keywork_of_reference_uid:
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0].str.findall(self.keywork_of_reference_uid)
self.subjName_forSelect = [self.subjName_forSelect_[self.ith_number_of_reference_uid]
for self.subjName_forSelect_ in
self.subjName_forSelect
if len(self.subjName_forSelect_)]
def walkAllPath(self):
self.allWalkPath = os.walk(self.targe_file_folder)
# allWalkPath=[allWalkPath_ for allWalkPath_ in allWalkPath]
return self
def fetch_allFilePath(self):
self.allFilePath = []
for onePath in self.allWalkPath:
for oneFile in onePath[2]:
target_folder = os.path.join(onePath[0], oneFile)
self.allFilePath.append(target_folder)
return self
def fetch_allSubjName(self):
'''
matching_point_number_of_target_uid_in_backwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
'''
self.allSubjName = self.allFilePath
for i in range(self.matching_point_number_of_target_uid_in_backwards - 1):
self.allSubjName = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSubjName_raw = self.allSubjName
return self
def fetch_folerNameContainingFile(self):
'''
如果file上一级uid不是subject name,那么就涉及到选择那个文件夹下的file
此时先确定每一个file上面的uid name(可能是模态名),然后根据你的关键词来筛选
'''
self.folerNameContainingFile = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allFilePath]
self.folerNameContainingFile = [os.path.basename(
folderName) for folderName in self.folerNameContainingFile]
return self
def fetch_allFileName(self):
'''
获取所有file name,用于后续的筛选。
适用场景:假如跟file一起的有我们不需要的file,
比如混杂在dicom file中的有text文件,而这些text是我们不想要的。
'''
self.allFileName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allFilePath]
return self
# %% screen according several rules
def screen_pathLogicalLocation_accordingTo_yourSubjName(self):
""" 匹配subject name:注意此处用精确匹配,只有完成匹配时,才匹配成功"""
"""maker sure subjName_forSelect is pd.Series and its content is string"""
if isinstance(self.subjName_forSelect, type(pd.DataFrame([1]))):
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]
if not isinstance(self.subjName_forSelect[0], str):
self.subjName_forSelect = pd.Series(
self.subjName_forSelect, dtype='str')
# 一定要注意匹配对之间的数据类型要一致!!!
try:
# 提取所有被试的uid
# self.logic_index_subjname=\
# np.sum(
# pd.DataFrame(
# [self.allSubjName.iloc[:,0].str.contains\
# (name_for_self) for name_for_self in self.subjName_forSelect]
# ).T,
# axis=1)
#
# self.logic_index_subjname=self.logic_index_subjname>=1
self.allSubjName = self.allSubjName.iloc[:, 0].str.findall(
self.keyword_of_target_file_uid)
# 正则表达提取后,可能有的不匹配而为空list,此时应该把空list当作不匹配而去除
allSubjName_temp = []
for name in self.allSubjName.values:
if name:
allSubjName_temp.append(name[self.ith_number_of_targetfile_uid])
else:
allSubjName_temp.append(None)
self.allSubjName = allSubjName_temp
self.allSubjName = pd.DataFrame(self.allSubjName)
self.subjName_forSelect = pd.DataFrame(self.subjName_forSelect)
self.logic_index_subjname = pd.DataFrame(
np.zeros(len(self.allSubjName)) == 1)
for i in range(len(self.subjName_forSelect)):
self.logic_index_subjname = self.logic_index_subjname.mask(
self.allSubjName == self.subjName_forSelect.iloc[i, 0], True)
except BaseException:
print('subjName mismatch subjName_forSelected!\nplease check their type')
sys.exit(0)
return self
def screen_pathLogicalLocation_accordingTo_folerNameContainingFile(self):
""" 匹配folerNameContainingFile:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
"""
if self.keyword_of_parent_folder_containing_target_file:
self.logic_index_foler_name_containing_file = [
self.keyword_of_parent_folder_containing_target_file in oneName_ for oneName_ in self.folerNameContainingFile]
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
else:
self.logic_index_foler_name_containing_file = np.ones(
[len(self.folerNameContainingFile), 1]) == 1
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
return self
def screen_pathLogicalLocation_accordingTo_fileName(self):
""" 匹配file name (不是用于提取uid):正则表达式匹配
"""
if self.keywork_of_target_file_not_for_uid:
self.allFileName = pd.Series(self.allFileName)
self.logic_index_file_name = self.allFileName.str.contains(
self.keywork_of_target_file_not_for_uid)
else:
self.logic_index_file_name = np.ones([len(self.allFileName), 1]) == 1
self.logic_index_file_name = pd.DataFrame(self.logic_index_file_name)
return self
# %% final logical location of selfected file path
def fetch_totalLogicalLocation(self):
self.logic_index_all = pd.concat(
[
self.logic_index_file_name,
self.logic_index_foler_name_containing_file,
self.logic_index_subjname],
axis=1)
self.logic_index_all = np.sum(
self.logic_index_all,
axis=1) == np.shape(
self.logic_index_all)[1]
return self
def fetch_selfectedFilePath_accordingPathLogicalLocation(self):
# target_folder
self.allFilePath = pd.DataFrame(self.allFilePath)
self.allSelectedFilePath = self.allFilePath[self.logic_index_all]
self.allSelectedFilePath = self.allSelectedFilePath.dropna()
# uid name
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSelectedSubjName = self.allSubjName[self.logic_index_all]
self.allSelectedSubjName = self.allSelectedSubjName.dropna()
# raw name
self.allSubjName_raw = pd.DataFrame(self.allSubjName_raw)
self.allSelectedSubjName_raw = self.allSubjName_raw[self.logic_index_all]
self.allSelectedSubjName_raw = self.allSelectedSubjName_raw.dropna()
return self
# %% run copy
def copy_base(self, i, subjName):
n_allSelectedSubj = len(np.unique(self.allSelectedSubjName_raw))
# 每个file保存到每个subjxxx文件夹下面
if self.save_into_one_or_more_folder == 'one_file_one_folder':
folder_name = subjName.split('.')[0]
output_folder = os.path.join(self.save_path, folder_name)
# 新建subjxxx文件夹
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 所有file保存到一个uid下面(file的名字以subjxxx命名)
elif self.save_into_one_or_more_folder == 'all_file_one_folder':
output_folder = os.path.join(
self.save_path, subjName + self.save_suffix)
# copying OR moving OR do nothing
fileIndex = self.allSelectedSubjName_raw[(
self.allSelectedSubjName_raw.values == subjName)].index.tolist()
if self.is_copy == 1 and self.is_move == 0:
[shutil.copy(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.is_copy == 0 and self.is_move == 1:
[shutil.move(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.is_copy == 0 and self.is_move == 0:
print('### No copy and No move ###\n')
else:
print('### Cannot copy and move at the same time! ###\n')
print('Copy the {}/{}th subject: {} OK!\n'.format(i + 1, n_allSelectedSubj, subjName))
def copy_multiprocess(self):
s = time.time()
# 每个file保存到每个subjxxx文件夹下面
if self.save_into_one_or_more_folder == 'one_file_one_folder':
pass
elif self.save_into_one_or_more_folder == 'all_file_one_folder':
pass
else:
print(
"###没有指定复制到一个文件夹还是每个被试文件夹###\n###{}跟'all_file_one_folder' OR 'one_file_one_folder'都不符合###".format(
self.save_into_one_or_more_folder))
# 多线程
# unique的name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
print('Copying...\n')
"""
# 单线程
for i,subjName in enumerate(uniSubjName):
self.copy_base(i,subjName)
"""
# 多线程
cores = multiprocessing.cpu_count()
if self.n_processess > cores:
self.n_processess = cores - 1
with ThreadPoolExecutor(self.n_processess) as executor:
for i, subjName in enumerate(uniSubjName):
executor.submit(self.copy_base, i, subjName)
print('=' * 30)
#
e = time.time()
print('Done!\nRunning time is {:.1f} second'.format(e - s))
# %%
def main_run(self):
# all target_folder and name
self._after_init()
self = self.walkAllPath()
self = self.fetch_allFilePath()
self = self.fetch_allSubjName()
self = self.fetch_allFileName()
# selfect
self = self.fetch_folerNameContainingFile()
# logicLoc_subjName:根据被试名字匹配所得到的logicLoc。以此类推。
# fileName≠subjName,比如fileName可以是xxx.nii,但是subjName可能是subjxxx
self = self.screen_pathLogicalLocation_accordingTo_yourSubjName()
self = self.screen_pathLogicalLocation_accordingTo_folerNameContainingFile()
self = self.screen_pathLogicalLocation_accordingTo_fileName()
self = self.fetch_totalLogicalLocation()
self = self.fetch_selfectedFilePath_accordingPathLogicalLocation()
self.unmatched_ref = \
pd.DataFrame(list(
set.difference(set(list(self.subjName_forSelect.astype(np.int32).iloc[:, 0])),
set(list(self.allSelectedSubjName.astype(np.int32).iloc[:, 0])))
)
)
print('=' * 50 + '\n')
print(
'Files that not found are : {}\n\nThey may be saved in:\n[{}]\n'.format(
self.unmatched_ref.values,
self.save_path))
print('=' * 50 + '\n')
# save for checking
if self.is_save_log:
# time information
now = time.localtime()
now = time.strftime("%Y-%m-%d %H:%M:%S", now)
# all matched name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
uniSubjName = [uniSubjName_ for uniSubjName_ in uniSubjName]
uniSubjName =
|
pd.DataFrame(uniSubjName)
|
pandas.DataFrame
|
import os, sys, inspect
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
import torch
import torchvision as tv
from asl.helper_functions.helper_functions import parse_args
from asl.loss_functions.losses import AsymmetricLoss, AsymmetricLossOptimized
from asl.models import create_model
import argparse
import time
import numpy as np
from scipy.stats import binom
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import pickle as pkl
from tqdm import tqdm
from utils import *
import seaborn as sns
from core.concentration import *
import pdb
parser = argparse.ArgumentParser(description='ASL MS-COCO predictor')
parser.add_argument('--model_path',type=str,default='../models/MS_COCO_TResNet_xl_640_88.4.pth')
parser.add_argument('--dset_path',type=str,default='../data/')
parser.add_argument('--model_name',type=str,default='tresnet_xl')
parser.add_argument('--input_size',type=int,default=640)
parser.add_argument('--dataset_type',type=str,default='MS-COCO')
parser.add_argument('--batch_size',type=int,default=5000)
parser.add_argument('--th',type=float,default=0.7)
def get_lamhat_precomputed(scores, labels, gamma, delta, num_lam, num_calib, tlambda):
lams = torch.linspace(0,1,num_lam)
lam = None
for i in range(lams.shape[0]):
lam = lams[i]
est_labels = (scores > lam).to(float)
avg_acc = (est_labels * labels.to(float)/labels.sum()).sum()
Rhat = 1-avg_acc
sigmahat = (1-(est_labels * labels.to(float)/labels.sum(dim=1).unsqueeze(1)).mean(dim=1)).std()
if Rhat >= gamma:
break
if Rhat + tlambda(Rhat,sigmahat,delta) >= gamma:
break
return lam
def get_example_loss_and_size_tables(scores, labels, lambdas_example_table, num_calib):
lam_len = len(lambdas_example_table)
lam_low = min(lambdas_example_table)
lam_high = max(lambdas_example_table)
fname_loss = f'../.cache/{lam_low}_{lam_high}_{lam_len}_example_loss_table.npy'
fname_sizes = f'../.cache/{lam_low}_{lam_high}_{lam_len}_example_size_table.npy'
try:
loss_table = np.load(fname_loss)
sizes_table = np.load(fname_sizes)
except:
loss_table = np.zeros((scores.shape[0], lam_len))
sizes_table = np.zeros((scores.shape[0], lam_len))
print("caching loss and size tables")
for j in tqdm(range(lam_len)):
est_labels = scores > lambdas_example_table[j]
loss, sizes = get_metrics_precomputed(est_labels, labels)
loss_table[:,j] = loss
sizes_table[:,j] = sizes
np.save(fname_loss, loss_table)
np.save(fname_sizes, sizes_table)
return loss_table, sizes_table
def trial_precomputed(example_loss_table, example_size_table, lambdas_example_table, gamma, delta, num_lam, num_calib, batch_size, tlambda, bound_str):
#total=example_loss_table.shape[0]
#perm = torch.randperm(example_loss_table.shape[0])
#example_loss_table = example_loss_table[perm]
#example_size_table = example_size_table[perm]
rng_state = np.random.get_state()
np.random.shuffle(example_loss_table)
np.random.set_state(rng_state)
np.random.shuffle(example_size_table)
calib_losses, val_losses = (example_loss_table[0:num_calib], example_loss_table[num_calib:])
calib_sizes, val_sizes = (example_size_table[0:num_calib], example_size_table[num_calib:])
lhat_rcps = get_lhat_from_table_binarysearch(calib_losses, lambdas_example_table, gamma, delta, tlambda, bound_str)
losses_rcps = val_losses[:,np.argmax(lambdas_example_table == lhat_rcps)]
sizes_rcps = val_sizes[:,np.argmax(lambdas_example_table == lhat_rcps)]
temp_calib_losses = calib_losses.copy()
temp_calib_losses[temp_calib_losses > 0] = 1 # for the conformal baseline, use a 0-1 multiclass loss.
lhat_conformal = get_lhat_conformal_from_table(temp_calib_losses, lambdas_example_table, gamma)
losses_conformal = val_losses[:,np.argmax(lambdas_example_table == lhat_conformal)]
sizes_conformal = val_sizes[:,np.argmax(lambdas_example_table == lhat_conformal)]
conformal_coverage = (losses_conformal > 0).astype(np.float64).mean()
return losses_rcps.mean(), torch.tensor(sizes_rcps), lhat_rcps, losses_conformal.mean(), torch.tensor(sizes_conformal), lhat_conformal, conformal_coverage
def plot_histograms(df_list,gamma,delta,bounds_to_plot):
fig, axs = plt.subplots(nrows=1,ncols=2,figsize=(12,3))
minrecall = min([min(df['risk_rcps'].min(),df['risk_conformal'].min()) for df in df_list])
maxrecall = max([max(df['risk_rcps'].max(),df['risk_conformal'].max()) for df in df_list])
recall_bins = np.arange(minrecall, maxrecall, 0.005)
sizes = torch.cat(df_list[0]['sizes_rcps'].tolist(),dim=0).numpy()
bl_sizes = torch.cat(df_list[0]['sizes_conformal'].tolist(),dim=0).numpy()
conformal_coverages = df_list[0]['conformal_coverage'].to_numpy()
print(f"Conformal coverage for baseline: {conformal_coverages.mean()}")
all_sizes = np.concatenate((sizes,bl_sizes),axis=0)
d = np.diff(np.unique(all_sizes)).min()
lofb = all_sizes.min() - float(d)/2
rolb = all_sizes.max() + float(d)/2
for i in range(len(df_list)):
df = df_list[i]
print(f"Bound {bounds_to_plot[i]} has coverage {1-(df['risk_rcps'] > gamma).mean()}")
axs[0].hist(np.array(df['risk_rcps'].tolist()), recall_bins, alpha=0.7, density=True, label=bounds_to_plot[i])
# Sizes will be 10 times as big as recall, since we pool it over runs.
sizes = torch.cat(df['sizes_rcps'].tolist(),dim=0).numpy()
axs[1].hist(sizes, np.arange(lofb,rolb+d, d), label='RCPS-' + bounds_to_plot[i], alpha=0.7, density=True)
axs[0].hist(np.array(df_list[0]['risk_conformal'].tolist()), recall_bins, alpha=0.7, density=True, label='Conformal')
axs[1].hist(bl_sizes, np.arange(lofb,rolb+d, d), label='Conformal', alpha=0.7, density=True)
axs[0].set_xlabel('risk')
axs[0].locator_params(axis='x', nbins=4)
axs[0].set_ylabel('density')
axs[0].set_yticks([0,100])
axs[0].axvline(x=gamma,c='#999999',linestyle='--',alpha=0.7)
axs[1].set_xlabel('size')
sns.despine(ax=axs[0],top=True,right=True)
sns.despine(ax=axs[1],top=True,right=True)
axs[1].legend()
plt.tight_layout()
plt.savefig('../' + (f'outputs/histograms/{gamma}_{delta}_coco_histograms').replace('.','_') + '.pdf')
def experiment(gamma,delta,num_lam,num_calib,num_grid_hbb,ub,ub_sigma,lambdas_example_table,epsilon,num_trials,maxiters,batch_size,bounds_to_plot, coco_val_2017_directory, coco_instances_val_2017_json):
df_list = []
for bound_str in bounds_to_plot:
if bound_str == 'Bentkus':
bound_fn = bentkus_mu_plus
elif bound_str == 'CLT':
bound_fn = None
elif bound_str == 'HB':
bound_fn = HB_mu_plus
elif bound_str == 'HBB':
bound_fn = HBB_mu_plus
elif bound_str == 'WSR':
bound_fn = WSR_mu_plus
else:
raise NotImplemented
fname = f'../.cache/{gamma}_{delta}_{num_calib}_{bound_str}_{num_trials}_dataframe.pkl'
df =
|
pd.DataFrame(columns = ["$\\hat{\\lambda}$","risk_rcps","size_rcps","risk_conformal","size_conformal","gamma","delta"])
|
pandas.DataFrame
|
import pandas as pd
from settings.language_strings import LANGUAGE_RECOMMENDER_ALGORITHMS_STOP, \
LANGUAGE_RECOMMENDER_ALGORITHMS_START
from posprocessing.distributions import multiprocess_get_distribution
from processing.multiprocessing_recommender import all_recommenders_multiprocessing
from processing.singleprocessing_recommender import item_knn_recommender, user_knn_recommender, svd_recommender, \
svdpp_recommender, nmf_recommender, slope_one_recommender
# #################################################################################################################### #
# ################################################# Single Process ################################################### #
# #################################################################################################################### #
def collaborative_filtering_singleprocess(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping):
evaluation_results_df = pd.DataFrame()
# # Item KNN
recommender_results_df = item_knn_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # User KNN
recommender_results_df = user_knn_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # SVD
recommender_results_df = svd_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # SVDpp
recommender_results_df = svdpp_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df = pd.concat([evaluation_results_df, recommender_results_df], sort=False)
# # NMF
recommender_results_df = nmf_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping)
evaluation_results_df =
|
pd.concat([evaluation_results_df, recommender_results_df], sort=False)
|
pandas.concat
|
import os
import pandas as pd
if __name__ == "__main__":
path = os.path
df = None
# Loop over the files within the folder
for filename in os.listdir('./data/nces/raw'):
if filename.endswith('.csv'):
data = pd.read_csv(f"./data/nces/raw/{filename}")
if df is None:
print("Initializing: "+filename)
df = data
else:
print("Concat: "+filename)
df = pd.concat([df, data], axis=0)
df = df.sort_values(by=['State School ID'])
# ensure the 4 digit zip does not have decimals
df['ZIP'] = df['ZIP'].astype(
|
pd.Int32Dtype()
|
pandas.Int32Dtype
|
__author__ = '<NAME>, SRL'
from flask import Flask, send_file
import plotly
import plotly.graph_objects as go
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import numpy as np
from dash.dependencies import Input, Output, State
import json
import requests
from urllib.parse import urlparse, parse_qs
import pandas as pd
from datetime import datetime
from io import BytesIO
import jwt
from typing import List, Tuple
class Crypt:
def __init__(self, secret: str):
self.secret = secret
def Encode(self, target: int, executions: List[int]) -> str:
"""'target' is the landing experiment execution, 'executions' is
the list of all executions belonging to the user"""
payload = {"t": target, "l": executions}
token = jwt.encode(payload, self.secret, algorithm="HS256")
if isinstance(token, bytes): # Older versions of jwt return bytes
token = token.decode(encoding="UTF-8")
return token
def Decode(self, token: str) -> Tuple[int, List[int]]:
"""Returns a tuple (<landing execution>, <list of executions>)"""
payload = jwt.decode(token, self.secret, algorithms=["HS256"])
return payload["t"], payload["l"]
server = Flask(__name__)
@server.route('/', methods=['GET'])
def index():
return {'about': "Visualization service for 5Genesis Analytics Component. Visit /help for more info and /dash to bring up the dashboard."}, 200
# Fetch the data source options
def fetch_datasource_options():
link = "http://data_handler:5000/get_datasources"
try:
data = requests.get(link).json()
return [{'label': item, 'value': item} for item in data['sources']]
except requests.HTTPError:
return [{'label': 'No datasource available', 'value': ''}]
datasource_options = fetch_datasource_options()
app = dash.Dash(
__name__,
server=server,
routes_pathname_prefix='/dash/',
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
stat_indicators = ['Mean', 'Standard Deviation', 'Median', 'Min', 'Max',
'25% Percentile', '75% Percentile', '5% Percentile', '95% Percentile']
app.layout = dbc.Container([
dcc.Location(id='url', refresh=False),
dbc.Row([
dbc.Col([
html.Div([
html.Img(src=app.get_asset_url('5genesis_logo.png'), # from https://pbs.twimg.com/media/EWm7hjlX0AUl_AJ.png
style={'height': '12rem', 'width': '12rem', 'border-radius': '50%'}),
html.H2("Analytics", style={'margin-top': '2rem'})
], style={'display': 'block', 'text-align': 'center', 'padding-top': '2rem'}),
html.Br(),
html.Div([
html.Div('Database'),
dcc.Dropdown(
options=datasource_options,
value=datasource_options[0]['value'],
id='datasource',
searchable=False,
clearable=False
)
]),
html.Br(),
html.Div([
html.Div('Experiment ID'),
dcc.Dropdown(id='experiment')
]),
html.Br(),
html.Div([
html.Div('Measurement Table'),
dcc.Dropdown(
id='measurement',
multi=True)
]),
html.Br(),
html.Div([
html.Div('Available Features'),
dcc.Dropdown(id='kpi', multi=True)
]),
html.Br(),
html.Hr(),
html.Br(),
html.Div([
html.Div('Outlier Detection Algorithm'),
dcc.Dropdown(
options=[
{'label': 'None', 'value': 'None'},
{'label': 'Z-score', 'value': 'zscore'},
{'label': 'MAD', 'value': 'mad'}],
value='None',
id='outlier',
searchable=False,
clearable=False
)]),
html.Br(),
html.Div([
html.Div('Time resolution'),
dcc.Input(
id="time_resolution",
type='text',
placeholder="1s",
value='1s',
style={'width': '75px'}
)
]),
html.Br(),
html.Div(
html.A(
dbc.Button('Reset', id='purge_cache_button'),
href='/dash/'
), style={'textAlign': 'center'})
], width=2, style={'background-color': "#f8f9fa"}),
dbc.Col([
# Hidden divisions to store data that'll be used as input for different callbacks
html.Div(id='df', style={'display': 'none'}),
html.Div(id='df_no_outliers', style={'display': 'none'}),
html.Div(id='test_case_stat_df', style={'display': 'none'}),
html.Div(id='it_stat_df', style={'display': 'none'}),
# html.Div(id='corr_matrix_download_data', style={'display': 'none'}),
# html.Div(id='corr_table_download_data', style={'display': 'none'}),
html.Div(id='prediction_results_df', style={'display': 'none'}),
# html.Br(),
# Create tabs
dcc.Tabs(id='tabs', value='time-series-tab', children=[
# Time Series tab
dcc.Tab(label='Time Series Overview', value='time-series-tab', children=[
# Time series graph
dbc.Row(dbc.Col(dcc.Graph(id='graph'))),
# dcc.Graph(id='graph_no_outliers')
# # download link
# dbc.Row(dbc.Col(
# html.A(
# 'Download Raw Data',
# id='download-link',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Statistical Analysis tab
dcc.Tab(label='Statistical Analysis', value='stat-analysis-tab', children=[
# graph
dbc.Row(dbc.Col(
dcc.Graph(id='box_plot')
)),
# table
dbc.Row(dbc.Col([
html.H4(children='Test Case Statistics'),
dash_table.DataTable(
id='table',
columns=[
{'name': 'Indicator', 'id': 'Indicator'},
{'name': 'Value', 'id': 'Value', 'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)},
{'name': 'Confidence Interval', 'id': 'Confidence Interval', 'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)}
]
),
# # download links
# html.Div(
# html.A(
# 'Download Per Iteration Statistics',
# id='iteration_download',
# download="",
# href="",
# target="_blank"
# ),
# ),
# html.Div(
# html.A(
# 'Download Test Case Statistics',
# id='test_case_download',
# download="",
# href="",
# target="_blank"
# )
# )
], width=6), justify='center')
]),
# Correlation tab
dcc.Tab(label='Correlation', value='correlation-tab', children=[
dcc.Tabs(id="corr-tabs", value="cross-correlation-tab", children=[
# Correlation Matrix
dcc.Tab(label='Cross-correlation of fields within the same experiment', value="cross-correlation-tab", children=[
dbc.Row(dbc.Col([
html.Div('Correlation method', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'value': 'pearson', 'label': 'Pearson correlation coefficient'},
{'value': 'kendall', 'label': 'Kendall Tau correlation coefficient'},
{'value': 'spearman', 'label': 'Spearman rank correlation'}
],
value='pearson',
id='correlation-method',
searchable=False,
clearable=False
)
], width=3)),
dbc.Row(dbc.Col(
dcc.Graph(id='correlation_graph')
)),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Correlation Matrix Data',
# id='corr_matrix_download',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Correlation table
dcc.Tab(label='Correlation of fields between two different experiments', value='experiment-correlation-tab', children=[
dbc.Row(dbc.Col([
html.Div('Pick Second Experiment ID', style={'margin-top': '20px'}),
dcc.Dropdown(id='experiment2'),
html.Br()
], width=3), justify='center'),
dbc.Row(dbc.Col(
dash_table.DataTable(
id='correlation_table',
columns=[
{'name': 'Correlation Field', 'id': 'Correlation Field', 'type': 'text'},
{'name': 'Value', 'id': 'Value', 'type': 'numeric', 'format': Format(precision=2, scheme=Scheme.fixed)}
], style_data={'width': '250px'}
), width='auto'
), justify='center'),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Correlation Table Data',
# id='corr_table_download',
# download="",
# href="",
# target="_blank"
# )
# ))
])
])
]),
# Feature Selection tab
dcc.Tab(label='Feature Selection', value='feature-selection-tab', children=[
# hidden division to store data
html.Div(id='feature_score', style={'display': 'none'}),
dbc.Row([
dbc.Col([
# Options
html.Div('Select Algorithm', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'label': 'Backward Elimination', 'value': 'backward'},
{'label': 'RFE', 'value': 'rfe'},
{'label': 'Lasso', 'value': 'lasso'}
],
value='lasso',
id='method',
searchable=False,
clearable=False
)
], width=2),
dbc.Col([
html.Div('Drop Features', style={'margin-top': '20px'}),
dcc.Dropdown(
id='drop_features',
multi=True
)
], width=3),
dbc.Col([
html.Div('Normalize (for RFE)', style={'margin-top': '20px'}),
dcc.RadioItems(
options=[
{'label': 'Yes', 'value': 'true'},
{'label': 'No', 'value': 'false'},
],
value='true',
id='normalize',
labelStyle={'display': 'inline-block', 'margin-top': '5px'}
)
], width='auto'),
dbc.Col([
html.Div('Alpha (for Lasso)', style={'margin-top': '20px'}),
dcc.Input(
id='alpha',
type='number',
value=0.1,
min=0, max=10, step=0.1
)
], width='auto')
]),
dbc.Row(dbc.Col(dcc.Graph(id='feature_bar'))),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Feature Selection Scores',
# id='features_download',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Prediction tab
dcc.Tab(label='Prediction', value='prediction-tab', children=[
dbc.Row([
# Options
dbc.Col([
html.Div('Select Algorithm', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'label': 'Linear Regression',
'value': 'linreg'},
{'label': 'Random Forest',
'value': 'rf'},
{'label': 'SVR', 'value': 'svr'}
],
value='linreg',
id='algorithm',
searchable=False,
clearable=False
)
], width=2),
dbc.Col([
html.Div('Drop Features', style={'margin-top': '20px'}),
dcc.Dropdown(
id='drop_features_pred',
multi=True
)
], width=3),
dbc.Col(
dbc.Button('Automatic feature selection', id='drop_features_button', color='light', style={'margin-top': '43px'}),
width="auto"
),
dbc.Col(
dbc.Button('Train model', id='train_button', style={'margin-top': '43px'}),
width="auto"
)
]),
dbc.Row(
# Prediction values graph
dbc.Col(dbc.Col(dcc.Graph(id='predicted_values_graph')))
),
dbc.Row([
# Prediction results
dbc.Col(
html.Div([
html.H4('Training results'),
dash_table.DataTable(
id='prediction_result_table',
columns=[
{
'name': 'Metric',
'id': 'Metric',
'type': 'text'
}, {
'name': 'Value',
'id': 'Value',
'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)
}
]
)
], style={'text-align': 'center'}), width=4
),
# Coefficient table
dbc.Col(
html.Div([
html.H4('Model coefficients'),
dash_table.DataTable(
id='prediction_coefficient_table',
columns=[
{
'name': 'Feature',
'id': 'Feature',
'type': 'text'
}, {
'name': 'Value',
'id': 'Value',
'type': 'numeric',
'format': Format(precision=4, scheme=Scheme.fixed)
}
]
)
], style={'text-align': 'center'}), width=4
)
], justify="around"),
dbc.Row(
dbc.Col(
html.A(
dbc.Button('Download model', id='download_button', style={'margin-bottom': '50px'}),
id='model_download_link',
href=None
), width="auto"
), justify="center"
)
])
])
])
])
], fluid=True)
def empty_figure(title='No data'):
return {
'data': [{'x': 0, 'y': 0}],
'layout': {'title': title}
}
empty_fig = empty_figure()
kpi_filter_list = ['Available RAM', 'PacketsReceived', 'Total RAM', 'Used CPU Per Cent', 'Used RAM', 'Used RAM Per Cent', # malaga old names
'host', 'Cell ID', 'Cell',
'facility', 'facility_x', 'facility_y',
'Success', 'Success_x', 'Success_y',
'hostname', 'hostname_x', 'hostname_y',
'appname', 'appname_x', 'appname_y',
'series', 'series_x', 'series_y',
'_iteration_', '_iteration__x', '_iteration__y',
'ExecutionId', 'ExecutionId_x', 'ExecutionId_y', 'Timestamp_x', 'Timestamp_y',
'Operator', 'DateTime', 'Network', 'LAC', 'PSC',
'AWGN State', 'Verdict']
meas_filter_list = ['execution_metadata', 'syslog']
# callback to return experiment ID options
@app.callback(
[Output('experiment', 'options'),
Output('experiment', 'value')],
[Input('url', 'search'),
Input('datasource', 'value')])
def experimentID_list(search, datasource):
if not search or not datasource:
return [], None
start = datetime.now()
params = parse_qs(urlparse(search).query)
token = params['token'][0]
if token == secret:
link = f'http://data_handler:5000/get_all_experimentIds/{datasource}'
r = requests.get(link)
experiment_list = list(r.json().values())[0]
experiment_target = None
else:
experiment_target, experiment_list = decoder.Decode(token)
if experiment_target and experiment_target not in experiment_list:
experiment_list += [experiment_target]
print(f"-- experimentID_list: {datetime.now()-start}", flush=True)
return [{'label': item, 'value': item} for item in sorted(experiment_list)], experiment_target
# callback to return measurement options
@app.callback(
[Output('measurement', 'options'),
Output('measurement', 'value')],
[Input('experiment', 'value')],
[State('datasource', 'value')])
def find_measurement(experiment, datasource):
if not experiment or not datasource:
return [], None
start = datetime.now()
link = f'http://data_handler:5000/get_measurements_for_experimentId/{datasource}/{experiment}'
r = requests.get(link)
meas_list = list(r.json().values())[0]
temp = []
for i in meas_list:
if i not in meas_filter_list: # to avoid having measurement tables which raise errors
temp.append({'label': i, 'value': i})
print(f"-- find_measurement: {datetime.now()-start}", flush=True)
return temp, None
# callback used to store the df in a hidden division
@app.callback(
Output('df', 'children'),
[Input('measurement', 'value'),
Input('outlier', 'value'),
Input('datasource', 'value'),
Input('experiment', 'value'),
Input('time_resolution', 'value'),
Input('purge_cache_button', 'n_clicks')])
def retrieve_df(measurement, outlier, datasource, experiment, time_resolution, purge_cache):
# input check - this order required (at first value is none, when filled it is a list)
if not measurement or not experiment or not time_resolution:
# empty_df = pd.DataFrame(data={})
return None
context = dash.callback_context
if context and context.triggered[0]['prop_id'].split('.')[0] == 'purge_cache_button':
requests.get('http://data_handler:5000/purge_cache')
return None
start = datetime.now()
link = f'http://data_handler:5000/get_data/{datasource}/{experiment}'
param_dict = {
'match_series': False,
'measurement': measurement,
'max_lag': time_resolution,
'remove_outliers': outlier
}
r = requests.get(link, params=param_dict)
print(f"-- retrieve_df: {datetime.now()-start}", flush=True)
# return df.to_json()
return r.text
@app.callback(
[Output('kpi', 'options'),
Output('kpi', 'value')],
[Input("df", "children")])
def update_dropdown(df):
if not df:
return [], None
start = datetime.now()
temp = []
df = pd.read_json(df)
for i in df.columns:
if not len(df[i].dropna()) == 0 and i not in kpi_filter_list:
temp.append({'label': i, 'value': i})
print(f"-- update_dropdown: {datetime.now()-start}", flush=True)
return temp, None
###
# Time Series Overview tab
###
# Time series graph
@app.callback(
Output('graph', 'figure'),
[Input('kpi', 'value'),
Input("outlier", 'value'),
Input('tabs', 'value')],
[State("df", "children")])
def update_graph(kpi, outlier, tab, df):
# input check
if not kpi or not df or not outlier or tab != "time-series-tab":
return empty_fig
start = datetime.now()
df = pd.read_json(df)
traces = []
for i in range(len(kpi)):
feature = kpi[i]
series = df[feature]
series.reset_index(drop=True, inplace=True)
traces.append(go.Scatter(
x=df.index,
y=series,
mode='lines',
name=feature,
yaxis=f"y{i+1}" if i > 0 else 'y'
))
figure = {
'data': traces,
'layout': {
'title': 'Time Series',
'xaxis': {
'title': 'Samples',
'domain': [0, 1 - (len(kpi) - 1) * 0.06],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': '#7f7f7f'
}
},
'yaxis': {
'title': kpi[0],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[0]
},
'tickfont': {
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[0]
}
},
"showlegend": False
}
}
for i in range(1, len(kpi)):
figure['layout'][f'yaxis{i+1}'] = {
'title': kpi[i],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[i]
},
'tickfont': {
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[i]
},
'overlaying': 'y',
'side': 'right',
'position': 1 - i * 0.06
}
print(f"-- update_graph: {datetime.now()-start}", flush=True)
return figure
###
# Statistical Analysis tab
###
# callback used to store the statistical analysis dataframes
@app.callback(
[Output("it_stat_df", "children"),
Output("test_case_stat_df", "children")],
[Input('kpi', 'value'),
Input('datasource', 'value'),
Input('tabs', 'value')],
[State('measurement', 'value'),
State('experiment', 'value')])
def retrieve_stats(kpi, datasource, tab, measurement, experiment):
if not kpi or not experiment or tab != 'stat-analysis-tab':
empty_df = pd.DataFrame(data={})
return empty_df.to_json(), empty_df.to_json()
else:
link = f'http://statistical_analysis:5003/statistical_analysis/{datasource}'
param_dict = {
'experimentid': experiment,
'kpi': kpi[0], # .replace(" ","%20")
'measurement': measurement
}
r = requests.get(link, params=param_dict)
data = r.json()
if not data['experimentid'][experiment]:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright 2022 <NAME>, <NAME>, <NAME>.
# Licensed under the BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
# This file may not be copied, modified, or distributed
# except according to those terms.
import sys
sys.stderr = open(snakemake.log[0], "w")
import altair as alt
import pandas as pd
import pysam
AA_ALPHABET_TRANSLATION = {
"Gly": "G",
"Ala": "A",
"Leu": "L",
"Met": "M",
"Phe": "F",
"Trp": "W",
"Lys": "K",
"Gln": "Q",
"Glu": "E",
"Ser": "S",
"Pro": "P",
"Val": "V",
"Ile": "I",
"Cys": "C",
"Tyr": "Y",
"His": "H",
"Arg": "R",
"Asn": "N",
"Asp": "D",
"Thr": "T",
}
def get_calls():
variants = []
for file, date, sample in zip(
snakemake.input.bcf, snakemake.params.dates, snakemake.params.samples
):
with pysam.VariantFile(file, "rb") as infile:
for record in infile:
vaf = record.samples[0]["AF"][0]
for ann in record.info["ANN"]:
ann = ann.split("|")
hgvsp = ann[11]
enssast_id = ann[6]
feature = ann[3]
orf = ann[3]
if hgvsp:
# TODO think about regex instead of splitting
enssast_id, alteration = hgvsp.split(":", 1)
_prefix, alteration = alteration.split(".", 1)
for triplet, amino in AA_ALPHABET_TRANSLATION.items():
alteration = alteration.replace(triplet, amino)
variants.append(
{
"feature": feature,
"alteration": alteration,
"vaf": vaf,
"date": date,
"sample": sample,
"orf": orf,
}
)
variants_df =
|
pd.DataFrame(variants)
|
pandas.DataFrame
|
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>), <NAME> (<EMAIL>)
import os
import yaml
import logging
import time
import psutil
import argparse
import pandas as pd
import numpy as np
import multiprocessing as mp
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from datetime import datetime, timedelta
from functools import partial
from tqdm import tqdm
from scipy.optimize import dual_annealing
from DELPHI_utils_V4_static import (
DELPHIAggregations, DELPHIDataSaver, DELPHIDataCreator, get_initial_conditions,
get_mape_data_fitting, create_fitting_data_from_validcases, get_residuals_value
)
from DELPHI_utils_V4_dynamic import get_bounds_params_from_pastparams
from DELPHI_params_V4 import (
fitting_start_date,
default_parameter_list,
dict_default_reinit_parameters,
dict_default_reinit_lower_bounds,
dict_default_reinit_upper_bounds,
default_upper_bound,
default_lower_bound,
percentage_drift_upper_bound,
percentage_drift_lower_bound,
percentage_drift_upper_bound_annealing,
percentage_drift_lower_bound_annealing,
default_upper_bound_annealing,
default_lower_bound_annealing,
default_lower_bound_t_jump,
default_parameter_t_jump,
default_upper_bound_t_jump,
default_lower_bound_std_normal,
default_parameter_std_normal,
default_upper_bound_std_normal,
default_bounds_params,
validcases_threshold,
IncubeD,
RecoverID,
RecoverHD,
DetectD,
VentilatedD,
default_maxT,
p_v,
p_d,
p_h,
max_iter,
)
## Initializing Global Variables ##########################################################################
with open("config.yml", "r") as ymlfile:
CONFIG = yaml.load(ymlfile, Loader=yaml.BaseLoader)
CONFIG_FILEPATHS = CONFIG["filepaths"]
time_beginning = time.time()
yesterday = "".join(str(datetime.now().date() - timedelta(days=1)).split("-"))
yesterday_logs_filename = "".join(
(str(datetime.now().date() - timedelta(days=1)) + f"_{datetime.now().hour}H{datetime.now().minute}M").split("-")
)
parser = argparse.ArgumentParser()
parser.add_argument(
'--run_config', '-rc', type=str, required=True,
help="specify relative path for the run config YAML file"
)
arguments = parser.parse_args()
with open(arguments.run_config, "r") as ymlfile:
RUN_CONFIG = yaml.load(ymlfile, Loader=yaml.BaseLoader)
USER_RUNNING = RUN_CONFIG["arguments"]["user"]
OPTIMIZER = RUN_CONFIG["arguments"]["optimizer"]
GET_CONFIDENCE_INTERVALS = bool(int(RUN_CONFIG["arguments"]["confidence_intervals"]))
SAVE_TO_WEBSITE = bool(int(RUN_CONFIG["arguments"]["website"]))
SAVE_SINCE100_CASES = bool(int(RUN_CONFIG["arguments"]["since100case"]))
PATH_TO_FOLDER_DANGER_MAP = CONFIG_FILEPATHS["danger_map"][USER_RUNNING]
PATH_TO_DATA_SANDBOX = CONFIG_FILEPATHS["data_sandbox"][USER_RUNNING]
PATH_TO_WEBSITE_PREDICTED = CONFIG_FILEPATHS["website"][USER_RUNNING]
past_prediction_date = "".join(str(datetime.now().date() - timedelta(days=14)).split("-"))
#############################################################################################################
def solve_and_predict_area(
tuple_area_state_: tuple,
yesterday_: str,
past_parameters_: pd.DataFrame,
popcountries: pd.DataFrame,
startT: str = None, # added to change optimmization start date
):
"""
Parallelizable version of the fitting & solving process for DELPHI V4, this function is called with multiprocessing
:param tuple_area_: tuple corresponding to (continent, country, province)
:param yesterday_: string corresponding to the date from which the model will read the previous parameters. The
format has to be 'YYYYMMDD'
:param past_parameters_: Parameters from yesterday_ used as a starting point for the fitting process
:param popcountries: DataFrame containing population information for all countries and provinces
:startT: string for the date from when the pandemic will be modelled (format should be 'YYYY-MM-DD')
:return: either None if can't optimize (either less than 100 cases or less than 7 days with 100 cases) or a tuple
with 3 dataframes related to that tuple_area_ (parameters df, predictions since yesterday_+1, predictions since
first day with 100 cases) and a scipy.optimize object (OptimizeResult) that contains the predictions for all
16 states of the model (and some other information that isn't used)
"""
time_entering = time.time()
continent, country, province, initial_state = tuple_area_state_
country_sub = country.replace(" ", "_")
province_sub = province.replace(" ", "_")
print(f"starting to predict for {continent}, {country}, {province}")
if os.path.exists(PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Cases_{country_sub}_{province_sub}.csv"):
totalcases = pd.read_csv(
PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Cases_{country_sub}_{province_sub}.csv"
)
if totalcases.day_since100.max() < 0:
logging.warning(
f"Not enough cases (less than 100) for Continent={continent}, Country={country} and Province={province}"
)
return None
if past_parameters_ is not None:
parameter_list_total = past_parameters_[
(past_parameters_.Country == country)
& (past_parameters_.Province == province)
].reset_index(drop=True)
if len(parameter_list_total) > 0:
parameter_list_line = parameter_list_total.iloc[-1, :].values.tolist()
parameter_list = parameter_list_line[5:]
parameter_list, bounds_params = get_bounds_params_from_pastparams(
optimizer=OPTIMIZER,
parameter_list=parameter_list,
dict_default_reinit_parameters=dict_default_reinit_parameters,
percentage_drift_lower_bound=percentage_drift_lower_bound,
default_lower_bound=default_lower_bound,
dict_default_reinit_lower_bounds=dict_default_reinit_lower_bounds,
percentage_drift_upper_bound=percentage_drift_upper_bound,
default_upper_bound=default_upper_bound,
dict_default_reinit_upper_bounds=dict_default_reinit_upper_bounds,
percentage_drift_lower_bound_annealing=percentage_drift_lower_bound_annealing,
default_lower_bound_annealing=default_lower_bound_annealing,
percentage_drift_upper_bound_annealing=percentage_drift_upper_bound_annealing,
default_upper_bound_annealing=default_upper_bound_annealing,
default_lower_bound_t_jump=default_lower_bound_t_jump,
default_upper_bound_t_jump=default_upper_bound_t_jump,
default_parameter_t_jump=default_parameter_t_jump,
default_lower_bound_std_normal=default_lower_bound_std_normal,
default_upper_bound_std_normal=default_upper_bound_std_normal,
default_parameter_std_normal=default_parameter_std_normal
)
start_date = pd.to_datetime(parameter_list_line[3])
bounds_params = tuple(bounds_params)
else:
# Otherwise use established lower/upper bounds
parameter_list = default_parameter_list
bounds_params = default_bounds_params
start_date = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, "date"].iloc[-1])
else:
# Otherwise use established lower/upper bounds
parameter_list = default_parameter_list
bounds_params = default_bounds_params
start_date = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, "date"].iloc[-1])
if startT is not None:
input_start_date = pd.to_datetime(startT)
if input_start_date > start_date:
delta_days = (input_start_date - start_date).days
parameter_list[9] = parameter_list[9] - delta_days
bounds_params_list = list(bounds_params)
bounds_params_list[9] = (bounds_params_list[9][0]-delta_days, bounds_params_list[9][1]-delta_days)
bounds_params = tuple(bounds_params_list)
start_date = input_start_date
validcases = totalcases[
(totalcases.date >= str(start_date.date()))
& (totalcases.date <= str((pd.to_datetime(yesterday_) + timedelta(days=1)).date()))
][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
else:
validcases = totalcases[
(totalcases.day_since100 >= 0)
& (totalcases.date <= str((pd.to_datetime(yesterday_) + timedelta(days=1)).date()))
][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
# Now we start the modeling part:
if len(validcases) <= validcases_threshold:
logging.warning(
f"Not enough historical data (less than a week)"
+ f"for Continent={continent}, Country={country} and Province={province}"
)
return None
else:
PopulationT = popcountries[
(popcountries.Country == country) & (popcountries.Province == province)
].pop2016.iloc[-1]
N = PopulationT
PopulationI = validcases.loc[0, "case_cnt"]
PopulationD = validcases.loc[0, "death_cnt"]
if initial_state is not None:
R_0 = initial_state[9]
else:
R_0 = validcases.loc[0, "death_cnt"] * 5 if validcases.loc[0, "case_cnt"] - validcases.loc[0, "death_cnt"]> validcases.loc[0, "death_cnt"] * 5 else 0
bounds_params_list = list(bounds_params)
bounds_params_list[-1] = (0.999,1)
bounds_params = tuple(bounds_params_list)
cases_t_14days = totalcases[totalcases.date >= str(start_date- pd.Timedelta(14, 'D'))]['case_cnt'].values[0]
deaths_t_9days = totalcases[totalcases.date >= str(start_date - pd.Timedelta(9, 'D'))]['death_cnt'].values[0]
R_upperbound = validcases.loc[0, "case_cnt"] - validcases.loc[0, "death_cnt"]
R_heuristic = cases_t_14days - deaths_t_9days
if int(R_0*p_d) >= R_upperbound and R_heuristic >= R_upperbound:
logging.error(f"Initial conditions for PopulationR too high for {country}-{province}, on {startT}")
"""
Fixed Parameters based on meta-analysis:
p_h: Hospitalization Percentage
RecoverHD: Average Days until Recovery
VentilationD: Number of Days on Ventilation for Ventilated Patients
maxT: Maximum # of Days Modeled
p_d: Percentage of True Cases Detected
p_v: Percentage of Hospitalized Patients Ventilated,
balance: Regularization coefficient between cases and deaths
"""
maxT = (default_maxT - start_date).days + 1
t_cases = validcases["day_since100"].tolist() - validcases.loc[0, "day_since100"]
balance, balance_total_difference, cases_data_fit, deaths_data_fit, weights = create_fitting_data_from_validcases(validcases)
GLOBAL_PARAMS_FIXED = (N, R_upperbound, R_heuristic, R_0, PopulationD, PopulationI, p_d, p_h, p_v)
def model_covid(
t, x, alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3
) -> list:
"""
SEIR based model with 16 distinct states, taking into account undetected, deaths, hospitalized and
recovered, and using an ArcTan government response curve, corrected with a Gaussian jump in case of
a resurgence in cases
:param t: time step
:param x: set of all the states in the model (here, 16 of them)
:param alpha: Infection rate
:param days: Median day of action (used in the arctan governmental response)
:param r_s: Median rate of action (used in the arctan governmental response)
:param r_dth: Rate of death
:param p_dth: Initial mortality percentage
:param r_dthdecay: Rate of decay of mortality percentage
:param k1: Internal parameter 1 (used for initial conditions)
:param k2: Internal parameter 2 (used for initial conditions)
:param jump: Amplitude of the Gaussian jump modeling the resurgence in cases
:param t_jump: Time where the Gaussian jump will reach its maximum value
:param std_normal: Standard Deviation of the Gaussian jump (~ time span of the resurgence in cases)
:param k3: Internal parameter 2 (used for initial conditions)
:return: predictions for all 16 states, which are the following
[0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D, 11 TH, 12 DVR,13 DVD, 14 DD, 15 DT]
"""
r_i = np.log(2) / IncubeD # Rate of infection leaving incubation phase
r_d = np.log(2) / DetectD # Rate of detection
r_ri = np.log(2) / RecoverID # Rate of recovery not under infection
r_rh = np.log(2) / RecoverHD # Rate of recovery under hospitalization
r_rv = np.log(2) / VentilatedD # Rate of recovery under ventilation
gamma_t = (
(2 / np.pi) * np.arctan(-(t - days) / 20 * r_s) + 1
+ jump * np.exp(-(t - t_jump) ** 2 / (2 * std_normal ** 2))
)
p_dth_mod = (2 / np.pi) * (p_dth - 0.001) * (np.arctan(-t / 20 * r_dthdecay) + np.pi / 2) + 0.001
assert (
len(x) == 16
), f"Too many input variables, got {len(x)}, expected 16"
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT = x
# Equations on main variables
dSdt = -alpha * gamma_t * S * I / N
dEdt = alpha * gamma_t * S * I / N - r_i * E
dIdt = r_i * E - r_d * I
dARdt = r_d * (1 - p_dth_mod) * (1 - p_d) * I - r_ri * AR
dDHRdt = r_d * (1 - p_dth_mod) * p_d * p_h * I - r_rh * DHR
dDQRdt = r_d * (1 - p_dth_mod) * p_d * (1 - p_h) * I - r_ri * DQR
dADdt = r_d * p_dth_mod * (1 - p_d) * I - r_dth * AD
dDHDdt = r_d * p_dth_mod * p_d * p_h * I - r_dth * DHD
dDQDdt = r_d * p_dth_mod * p_d * (1 - p_h) * I - r_dth * DQD
dRdt = r_ri * (AR + DQR) + r_rh * DHR
dDdt = r_dth * (AD + DQD + DHD)
# Helper states (usually important for some kind of output)
dTHdt = r_d * p_d * p_h * I
dDVRdt = r_d * (1 - p_dth_mod) * p_d * p_h * p_v * I - r_rv * DVR
dDVDdt = r_d * p_dth_mod * p_d * p_h * p_v * I - r_dth * DVD
dDDdt = r_dth * (DHD + DQD)
dDTdt = r_d * p_d * I
return [
dSdt, dEdt, dIdt, dARdt, dDHRdt, dDQRdt, dADdt, dDHDdt,
dDQDdt, dRdt, dDdt, dTHdt, dDVRdt, dDVDdt, dDDdt, dDTdt,
]
def residuals_totalcases(params) -> float:
"""
Function that makes sure the parameters are in the right range during the fitting process and computes
the loss function depending on the optimizer that has been chosen for this run as a global variable
:param params: currently fitted values of the parameters during the fitting process
:return: the value of the loss function as a float that is optimized against (in our case, minimized)
"""
# Variables Initialization for the ODE system
alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3 = params
# Force params values to stay in a certain range during the optimization process with re-initializations
params = (
max(alpha, dict_default_reinit_parameters["alpha"]),
days,
max(r_s, dict_default_reinit_parameters["r_s"]),
max(min(r_dth, 1), dict_default_reinit_parameters["r_dth"]),
max(min(p_dth, 1), dict_default_reinit_parameters["p_dth"]),
max(r_dthdecay, dict_default_reinit_parameters["r_dthdecay"]),
max(k1, dict_default_reinit_parameters["k1"]),
max(k2, dict_default_reinit_parameters["k2"]),
max(jump, dict_default_reinit_parameters["jump"]),
max(t_jump, dict_default_reinit_parameters["t_jump"]),
max(std_normal, dict_default_reinit_parameters["std_normal"]),
max(k3, dict_default_reinit_lower_bounds["k3"]),
)
x_0_cases = get_initial_conditions(
params_fitted=params, global_params_fixed=GLOBAL_PARAMS_FIXED
)
x_sol_total = solve_ivp(
fun=model_covid,
y0=x_0_cases,
t_span=[t_cases[0], t_cases[-1]],
t_eval=t_cases,
args=tuple(params),
)
x_sol = x_sol_total.y
# weights = list(range(1, len(cases_data_fit) + 1))
# weights = [(x/len(cases_data_fit))**2 for x in weights]
if x_sol_total.status == 0:
residuals_value = get_residuals_value(
optimizer=OPTIMIZER,
balance=balance,
x_sol=x_sol,
cases_data_fit=cases_data_fit,
deaths_data_fit=deaths_data_fit,
weights=weights,
balance_total_difference=balance_total_difference
)
else:
residuals_value = 1e16
return residuals_value
if OPTIMIZER in ["tnc", "trust-constr"]:
output = minimize(
residuals_totalcases,
parameter_list,
method=OPTIMIZER,
bounds=bounds_params,
options={"maxiter": max_iter},
)
elif OPTIMIZER == "annealing":
output = dual_annealing(
residuals_totalcases, x0=parameter_list, bounds=bounds_params
)
print(f"Parameter bounds are {bounds_params}")
print(f"Parameter list is {parameter_list}")
else:
raise ValueError("Optimizer not in 'tnc', 'trust-constr' or 'annealing' so not supported")
if (OPTIMIZER in ["tnc", "trust-constr"]) or (OPTIMIZER == "annealing" and output.success):
best_params = output.x
t_predictions = [i for i in range(maxT)]
def solve_best_params_and_predict(optimal_params):
# Variables Initialization for the ODE system
alpha, days, r_s, r_dth, p_dth, r_dthdecay, k1, k2, jump, t_jump, std_normal, k3 = optimal_params
optimal_params = [
max(alpha, dict_default_reinit_parameters["alpha"]),
days,
max(r_s, dict_default_reinit_parameters["r_s"]),
max(min(r_dth, 1), dict_default_reinit_parameters["r_dth"]),
max(min(p_dth, 1), dict_default_reinit_parameters["p_dth"]),
max(r_dthdecay, dict_default_reinit_parameters["r_dthdecay"]),
max(k1, dict_default_reinit_parameters["k1"]),
max(k2, dict_default_reinit_parameters["k2"]),
max(jump, dict_default_reinit_parameters["jump"]),
max(t_jump, dict_default_reinit_parameters["t_jump"]),
max(std_normal, dict_default_reinit_parameters["std_normal"]),
max(k3, dict_default_reinit_lower_bounds["k3"]),
]
x_0_cases = get_initial_conditions(
params_fitted=optimal_params,
global_params_fixed=GLOBAL_PARAMS_FIXED,
)
x_sol_best = solve_ivp(
fun=model_covid,
y0=x_0_cases,
t_span=[t_predictions[0], t_predictions[-1]],
t_eval=t_predictions,
args=tuple(optimal_params),
).y
return x_sol_best
x_sol_final = solve_best_params_and_predict(best_params)
data_creator = DELPHIDataCreator(
x_sol_final=x_sol_final,
date_day_since100=start_date,
best_params=best_params,
continent=continent,
country=country,
province=province,
testing_data_included=False,
)
mape_data = get_mape_data_fitting(
cases_data_fit=cases_data_fit, deaths_data_fit=deaths_data_fit, x_sol_final=x_sol_final
)
logging.info(f"In-Sample MAPE Last 15 Days {country, province}: {round(mape_data, 3)} %")
logging.debug(f"Best fitted parameters for {country, province}: {best_params}")
df_parameters_area = data_creator.create_dataset_parameters(mape_data)
# Creating the datasets for predictions of this area
if GET_CONFIDENCE_INTERVALS:
df_predictions_since_today_area, df_predictions_since_100_area = (
data_creator.create_datasets_with_confidence_intervals(
cases_data_fit, deaths_data_fit,
past_prediction_file=PATH_TO_FOLDER_DANGER_MAP + f"predicted/Global_V4_{past_prediction_date}.csv",
past_prediction_date=str(
|
pd.to_datetime(past_prediction_date)
|
pandas.to_datetime
|
#!/usr/bin/env python
from __future__ import print_function
import os
import time
import yaml
import pprint
import random
import pickle
import shutil
import inspect
import argparse
from collections import OrderedDict, defaultdict
from datetime import date, datetime
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.optim.lr_scheduler import MultiStepLR
import pandas as pd
import torch.nn.functional as F
print(torch.__version__)
# from personality_recognition.evaluate import compute_metrics
# import apex
from utils import count_params, import_class
OCEAN_COLUMNS = ['OPENMINDEDNESS_Z', 'CONSCIENTIOUSNESS_Z', 'EXTRAVERSION_Z', 'AGREEABLENESS_Z',
'NEGATIVEEMOTIONALITY_Z']
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def my_loss_func(output, target, weight):
assert weight.shape[1] == target.shape[1]
loss = torch.mean(((output - target) ** 2) * weight)
return loss
def weighted_l1_loss(inputs, targets, weights=None):
loss = F.l1_loss(inputs, targets, reduction='none')
if weights is not None:
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss
def update_feeder_args(feeder_args, dataset_dir):
if "data_path" in feeder_args:
feeder_args['data_path'] = os.path.join(dataset_dir, feeder_args['data_path'])
if "label_path" in feeder_args:
feeder_args['label_path'] = os.path.join(dataset_dir, feeder_args['label_path'])
if "laban_path" in feeder_args:
feeder_args['laban_path'] = os.path.join(dataset_dir, feeder_args['laban_path'])
if "info_path" in feeder_args:
feeder_args['info_path'] = os.path.join(dataset_dir, feeder_args['info_path'])
def get_parser():
# parameter priority: command line > config file > default
parser = argparse.ArgumentParser(description='MS-G3D')
parser.add_argument(
'--dataset-dir',
required=True,
help='Path to dataset folder'
)
parser.add_argument(
'--work-dir',
type=str,
required=True,
help='the work folder for storing results')
parser.add_argument('--model_saved_name', default='')
parser.add_argument(
'--config',
default='./config/nturgbd-cross-view/test_bone.yaml',
help='path to the configuration file')
parser.add_argument(
'--assume-yes',
action='store_true',
help='Say yes to every prompt')
parser.add_argument(
'--personality_index',
default='0',
help='Index of the OCEAN trait')
parser.add_argument(
'--phase',
default='train',
help='must be train or test')
parser.add_argument(
'--save-score',
type=str2bool,
default=False,
help='if ture, the classification score will be stored')
parser.add_argument(
'--seed',
type=int,
default=random.randrange(200),
help='random seed')
parser.add_argument(
'--log-interval',
type=int,
default=100,
help='the interval for printing messages (#iteration)')
parser.add_argument(
'--save-interval',
type=int,
default=1,
help='the interval for storing models (#iteration)')
parser.add_argument(
'--eval-interval',
type=int,
default=1,
help='the interval for evaluating models (#iteration)')
parser.add_argument(
'--eval-start',
type=int,
default=1,
help='The epoch number to start evaluating models')
parser.add_argument(
'--print-log',
type=str2bool,
default=True,
help='print logging or not')
parser.add_argument(
'--show-topk',
type=int,
default=[1, 5],
nargs='+',
help='which Top K accuracy will be shown')
parser.add_argument(
'--feeder',
default='feeder.feeder',
help='data loader will be used')
parser.add_argument(
'--num-worker',
type=int,
default=os.cpu_count(),
help='the number of worker for data loader')
parser.add_argument(
'--train-feeder-args',
default=dict(),
help='the arguments of data loader for training')
parser.add_argument(
'--test-feeder-args',
default=dict(),
help='the arguments of data loader for test')
parser.add_argument(
'--model',
default=None,
help='the model will be used')
parser.add_argument(
'--model-args',
type=dict,
default=dict(),
help='the arguments of model')
parser.add_argument(
'--weights',
default=None,
help='the weights for network initialization')
parser.add_argument(
'--ignore-weights',
type=str,
default=[],
nargs='+',
help='the name of weights which will be ignored in the initialization')
parser.add_argument(
'--half',
action='store_true',
help='Use half-precision (FP16) training')
parser.add_argument(
'--amp-opt-level',
type=int,
default=1,
help='NVIDIA Apex AMP optimization level')
parser.add_argument(
'--base-lr',
type=float,
default=0.01,
help='initial learning rate')
parser.add_argument(
'--step',
type=int,
default=[20, 40, 60],
nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument(
'--device',
type=int,
default=0,
nargs='+',
help='the indexes of GPUs for training or testing')
parser.add_argument(
'--optimizer',
default='SGD',
help='type of optimizer')
parser.add_argument(
'--nesterov',
type=str2bool,
default=False,
help='use nesterov or not')
parser.add_argument(
'--batch-size',
type=int,
default=32,
help='training batch size')
parser.add_argument(
'--test-batch-size',
type=int,
default=256,
help='test batch size')
parser.add_argument(
'--forward-batch-size',
type=int,
default=16,
help='Batch size during forward pass, must be factor of --batch-size')
parser.add_argument(
'--start-epoch',
type=int,
default=0,
help='start training from which epoch')
parser.add_argument(
'--num-epoch',
type=int,
default=80,
help='stop training in which epoch')
parser.add_argument(
'--weight-decay',
type=float,
default=0.0005,
help='weight decay for optimizer')
parser.add_argument(
'--optimizer-states',
type=str,
help='path of previously saved optimizer states')
parser.add_argument(
'--checkpoint',
type=str,
help='path of previously saved training checkpoint')
parser.add_argument(
'--debug',
type=str2bool,
default=False,
help='Debug mode; default false')
return parser
class Processor():
"""Processor for Skeleton-based Action Recgnition"""
def __init__(self, arg):
self.arg = arg
self.save_arg()
if arg.phase == "test":
self.ses_df = pd.read_csv(os.path.join(self.arg.dataset_dir, "metadata_test/sessions_test.csv"), dtype={'ID': str})
self.ppl_df = pd.read_csv(os.path.join(self.arg.dataset_dir, "metadata_test/parts_test.csv"))
else:
train_ses_df = pd.read_csv(os.path.join(self.arg.dataset_dir, "metadata_train/sessions_train.csv"), dtype={'ID': str})
train_ppl_df = pd.read_csv(os.path.join(self.arg.dataset_dir, "metadata_train/parts_train.csv"))
val_ses_df = pd.read_csv(os.path.join(self.arg.dataset_dir, "metadata_val/sessions_val.csv"), dtype={'ID': str})
val_ppl_df = pd.read_csv(os.path.join(self.arg.dataset_dir, "metadata_val/parts_val.csv"))
self.ses_df = pd.concat((train_ses_df, val_ses_df), ignore_index=True)
self.ppl_df =
|
pd.concat((train_ppl_df, val_ppl_df), ignore_index=True)
|
pandas.concat
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df =
|
DataFrame({"s1": s1, "s2": s2})
|
pandas.DataFrame
|
#v1.0
#v0.9 - All research graph via menu & mouse click
#v0.8 - Candlestick graphs
#v0.7 - Base version with all graphs and bug fixes
#v0.6
import pandas as pd
from pandas import DataFrame
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.techindicators import TechIndicators
class PrepareTestData():
def __init__(self, argFolder=None, argOutputSize='compact'):
super().__init__()
#argFolder='./scriptdata'
self.folder = argFolder + '/'
self.outputsize = argOutputSize.lower()
def loadDaily(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'daily_compact_'+argScript+'.csv'
else:
filename=self.folder + 'daily_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadIntra(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'intraday_5min_compact_'+argScript+'.csv'
else:
filename=self.folder + 'intraday_5min_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadSMA(self, argScript='', argPeriod=20):
try:
#if(argPeriod == 0):
# csvdf = pd.read_csv(self.folder + 'SMA_'+argScript+'.csv')
#else:
csvdf = pd.read_csv(self.folder + 'SMA_'+str(argPeriod)+ '_'+argScript+'.csv')
convert_type={'SMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_sma(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadEMA(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'EMA_'+argScript+'.csv')
convert_type={'EMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadVWMP(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'VWAP_'+argScript+'.csv')
convert_type={'VWAP':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadRSI(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'RSI_'+argScript+'.csv')
convert_type={'RSI':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadStochasticOscillator(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'STOCH_'+argScript+'.csv')
convert_type={'SlowD':float, 'SlowK':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadMACD(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'MACD_'+argScript+'.csv')
convert_type={'MACD':float, 'MACD_Hist':float, 'MACD_Signal':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadAROON(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'AROON_'+argScript+'.csv')
convert_type={'Aroon Down':float, 'Aroon Up':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf =
|
DataFrame()
|
pandas.DataFrame
|
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
from datetime import datetime, timedelta
import re
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas._libs.index as _index
import pandas as pd
from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
import pandas._testing as tm
def test_fancy_getitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s["1/2/2009"] == 48
assert s["2009-1-2"] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[Timestamp(datetime(2009, 1, 2))] == 48
with pytest.raises(KeyError, match=r"^'2009-1-3'$"):
s["2009-1-3"]
tm.assert_series_equal(
s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]
)
def test_fancy_setitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s["1/2/2009"] = -2
assert s[48] == -2
s["1/2/2009":"2009-06-05"] = -3
assert (s[48:54] == -3).all()
def test_dti_reset_index_round_trip():
dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype("M8[ns]")
d3 = d2.set_index("index")
tm.assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
df = df.set_index("Date")
assert df.index[0] == stamp
assert df.reset_index()["Date"][0] == stamp
@pytest.mark.slow
def test_slice_locs_indexerror():
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]
s = Series(range(100000), times)
s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
def test_slicing_datetimes():
# GH 7523
# unique
df = DataFrame(
np.arange(4.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
# duplicates
df = DataFrame(
np.arange(5.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
|
tm.assert_frame_equal(result, df)
|
pandas._testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/1/28 下午7:11
@File : rps.py
@author : pchaos
@license : Copyright(C), pchaos
@Contact : <EMAIL>
"""
import datetime
import pandas as pd
import numpy as np
from abc import ABC, abstractmethod
from abc import ABCMeta
import QUANTAXIS as qa
from .comm import str2date, date2str
# 计算收益率
def cal_ret(dataFrame, *args, **kwargs):
'''计算相对收益率
days: 周 5;月:20;半年:120; 一年:250
'''
if len(args) == 0:
args = tuple([20])
close = dataFrame.close
colName = 'MARKUP'
cols = []
for num in args:
coln = '{}{}'.format(colName, num)
dataFrame[coln] = close / close.shift(num)
cols.append(coln)
# return dataFrame.iloc[-max(args):, :].fillna(0)
# return dataFrame[cols].iloc[max(args):, :]
return dataFrame[cols].iloc[max(args):, :].dropna()
# 计算RPS
def get_RPS(dataFrame, *args, **kwargs):
"""收益率dataFrame计算RPS
"""
i = 0
# print("日期:{} 数量:{}".format(dataFrame.index.get_level_values(0)[0], len(dataFrame)))
for col in dataFrame.columns:
newcol = col.replace("MARKUP", "RPS", 1)
if i > 0:
df2 = getSingleRPS(dataFrame, col, newcol)
df[newcol] = df2[newcol]
else:
df = getSingleRPS(dataFrame, col, newcol)
i += 1
return df
def getSingleRPS(dataFrame, col, newcol):
df = pd.DataFrame(dataFrame[col].sort_values(ascending=False).dropna())
dfcount = len(df)
# range间隔-100.,这样就不用乘以100%了
df['n'] = range(dfcount * 100, 0, -100)
df[newcol] = df['n'] / dfcount
# 删除index date
return df.reset_index().set_index(['code'])[[newcol]]
class RPSAbs(metaclass=ABCMeta):
"""计算RPS基础类
"""
def __init__(self, codes=[], startDate=datetime.date.today(), endDate=None, rpsday=[20, 50]):
self._codes = codes
self._startDate = startDate
self._endDate = endDate
self._rpsday = rpsday
self._rps = None
@property
def codes(self):
"""代码列表(list)"""
return self._codes
@codes.setter
def codes(self, value):
self._codes = value
def __repr__(self):
return '{0}->{1}'.format(self._codes, len(self._codes))
@abstractmethod
def _fetchData(self):
"""获取QA_DataStruct
@return QA_DataStruct
"""
# data = qa.QA_fetch_index_day_adv(self.__codes, self.__startDate, self.__endDate)
# return None
def rps(self, reCaculate=True) -> pd.DataFrame:
"""计算rps
@return pd.DataFrame
"""
if reCaculate:
# 需要重新计算
self._rps = None
if self._rps is None:
self._getRPS()
return self._rps
def rpsTopN(self, theday: datetime.datetime, percentN=5) -> pd.DataFrame:
"""RPS排名前percentN%的数据
"""
rps = self._getRPS()
lastday = theday = str2date(date2str(theday))
while 1:
# 定位最近的rps数据
dfn = []
try:
df = rps.loc[(slice(pd.Timestamp(theday), pd.Timestamp(lastday))), :]
if len(df) > 0:
# 排名前N%的指数
for col in df.columns:
# dfn.append(df.sort_values(by=col, ascending=False).reset_index().head(int(len(df) / (100 / percentN))))
dfn.append(np.round(df[df[col] >= 100 - percentN], decimals=4))
dftopn =
|
pd.concat(dfn)
|
pandas.concat
|
import pandas
import pytest
import modin.pandas as pd
import numpy as np
from .utils import test_data_values, test_data_keys, df_equals
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(
|
pandas.Series([1, np.nan, 2])
|
pandas.Series
|
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
import numpy as np
import pandas as pd
from python.tools import (
clean_folder
)
def construct_dataset(file_name, var_name):
"""Convenience function for constructing
a clean Pandas dataframe from the CSV
files provided by JH CSSE on their Github
repo
Args:
file_name (str): File name / URL of CSV file
var_name (name): Variable name
Returns:
df: Dataframe
"""
df =
|
pd.read_csv(file_name)
|
pandas.read_csv
|
# Load dependencies
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from matplotlib import *
import matplotlib.pyplot as plt
from matplotlib.cm import register_cmap
from scipy import stats
from sklearn.decomposition import PCA
import seaborn
class Wrangle:
def __init__(self, df):
self.df = df
def format(self):
df = self.df
df.drop(df[df.sq_feet == 0].index, inplace=True)
df.drop(df[df.price == 0].index, inplace=True)
df.dropna(inplace=True)
# Remove outliers
df = df[df.sq_feet < df.sq_feet.quantile(0.80)]
# Manual one hot encodng of utilities included column
df = df.assign(heat=0, electricity=0, water=0, internet=0, cable=0)
for index, row in df.iterrows():
if "Heat" in row["utilities_included"]:
df.at[index, "heat"] = 1
if "Electricity" in row["utilities_included"]:
df.at[index, "electricity"] = 1
if "Water" in row["utilities_included"]:
df.at[index, "water"] = 1
if "Internet" in row["utilities_included"]:
df.at[index, "internet"] = 1
if "Cable" in row["utilities_included"]:
df.at[index, "cable"] = 1
# Conditionally replace quadrant names
df.loc[df["quadrant"] == None, "quadrant"] = "Unspecified"
df.loc[
(df["quadrant"] == "Inner-City||SW") | (df["quadrant"] == "SW||Inner-City"),
"quadrant",
] = "SW-Central"
df.loc[
(df["quadrant"] == "Inner-City||NW") | (df["quadrant"] == "NW||Inner-City"),
"quadrant",
] = "NW-Central"
df.loc[
(df["quadrant"] == "Inner-City||SE") | (df["quadrant"] == "SE||Inner-City"),
"quadrant",
] = "SE-Central"
df.loc[
(df["quadrant"] == "Inner-City||NE") | (df["quadrant"] == "NE||Inner-City"),
"quadrant",
] = "NE-Central"
# One hot encoding of quadrants
df["quadrant"] = pd.Categorical(df["quadrant"])
dfDummies = pd.get_dummies(df["quadrant"], prefix="Quadrant")
df = pd.concat([df, dfDummies], axis=1)
# One hot encoding of type
df["type"] = pd.Categorical(df["type"])
dfDummies = pd.get_dummies(df["type"], prefix="type")
df = pd.concat([df, dfDummies], axis=1)
# One hot encoding of community
df["community"] = pd.Categorical(df["community"])
dfDummies = pd.get_dummies(df["community"], prefix="community")
df = pd.concat([df, dfDummies], axis=1)
# Clean the den column
df.loc[df["den"] == "Yes", "den"] = 1
df.loc[(df["den"] == "No") | (df["den"] == None), "den"] = 0
# One hot encoding for den
df["den"] = pd.Categorical(df["den"])
dfDummies = pd.get_dummies(df["den"], prefix="den")
df = pd.concat([df, dfDummies], axis=1)
# Remove unencoded cols
df.drop(
["type", "community", "den", "quadrant", "utilities_included"],
axis=1,
inplace=True,
)
# Remove any blank entries (necessary for matrix)
df.replace("", np.nan, inplace=True)
df.dropna(inplace=True)
self.df = df
def ffs(self):
"""Forward Feature Selection"""
df = self.df
from sklearn.feature_selection import f_regression
X = df.drop(["price"], axis=1)
y = df["price"]
ffs = f_regression(X, y)
variables = []
for i in range(0, len(X.columns) - 1):
if ffs[0][i] >= 50:
variables.append(X.columns[i])
variables.insert(0, "price")
self.df = df[variables]
def pca(self):
"""Principal component analysis"""
df = self.df
X = df.drop(["price"], axis=1)
y = df["price"]
scaled = StandardScaler().fit_transform(df)
X = scaled[:, 1:]
y = scaled[:, 0]
# Perform eigendecomposition on covariance matrix
cov_mat = np.cov(X.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
# print('Eigenvectors \n%s' %eig_vecs)
# print('\nEigenvalues \n%s' %eig_vals)
# Conduct PCA
pca = PCA(n_components=126) # ~ 60% explained variance
X_pca = pca.fit_transform(X)
def explained_var_plot():
# Explained variance
pca = PCA().fit(X)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel("number of components")
plt.ylabel("cumulative explained variance")
plt.title("Explained Variance PCA")
fig = plt.gcf()
fig.savefig("images/explained_variance_pca.png", dpi=fig.dpi)
fig.clf()
explained_var_plot()
def pca_n_components():
plt.plot(range(126), pca.explained_variance_ratio_)
plt.plot(range(126), np.cumsum(pca.explained_variance_ratio_))
plt.title("Component-wise and Cumulative Explained Variance")
fig = plt.gcf()
fig.savefig("images/cumulative_explained_var.png", dpi=fig.dpi)
fig.clf()
pca_n_components()
df_y =
|
pd.DataFrame({"price": y})
|
pandas.DataFrame
|
import sys
import geojson
from shapely.geometry import shape
import pandas as pd
from pathlib import Path
from fire import Fire
def convert(path, region_type):
converted = []
for p in Path(path).glob('*.GeoJson'):
d = geojson.load(open(p, 'r'))
converted.append(dict(
country_name=d['properties']['name'],
country_iso=d['properties']['alltags']['ISO3166-1'],
region_slug='_'.join([region_type] + d['properties']['name'].lower().split(' ')),
region_name=d['properties']['name'],
region_type=region_type,
dashboard='TRUE',
population=d['properties']['alltags'].get('population'),
timezone=d['properties']['alltags'].get('timezone'),
region_shapefile_wkt=shape(d['geometry']).simplify(0.05, preserve_topology=False).wkt
))
|
pd.DataFrame(converted)
|
pandas.DataFrame
|
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any() or grossmargins.std()==0:
return None
marginstability=grossmargins.mean()/grossmargins.std()
if pd.isnull(marginstability):
return None
return float(marginstability)
def get_cacl(self,df,seed=-1):
a=self.get_value(df,'totalcurrentassets',seed)
l=self.get_value(df,'totalcurrentliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_tatl(self,df,seed=-1):
a=self.get_value(df,'totalassets',seed)
l=self.get_value(df,'totalliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_longterm_cacl(self,df,seed=-1,length=20):
ltcacls=[]
for i in range(seed,seed-length,-1):
ltcacls.append(self.get_cacl(df,i))
ltcacls=pd.Series(ltcacls)
if pd.isnull(ltcacls).any():
return None
return stats.gmean(1+ltcacls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_longterm_tatl(self,df,seed=-1,length=20):
lttatls=[]
for i in range(seed,seed-length,-1):
lttatls.append(self.get_tatl(df,i))
lttatls=pd.Series(lttatls)
if pd.isnull(lttatls).any():
return None
return stats.gmean(1+lttatls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_capex(self,df,seed=-1,length=4):
purchaseofplantpropertyandequipment=self.get_sum_quarters(df,'purchaseofplantpropertyandequipment',seed,length)
saleofplantpropertyandequipment=self.get_sum_quarters(df,'saleofplantpropertyandequipment',seed,length)
s=pd.Series([purchaseofplantpropertyandequipment,saleofplantpropertyandequipment])
if pd.isnull(s).all():
return None
m=pd.Series([-1,-1])
capex=(s*m).sum()
if capex is None:
return None
return float(capex)
def get_freecashflow(self,df,seed=-1):
netcashfromoperatingactivities=self.get_value(df,'netcashfromoperatingactivities',seed)
capex=self.get_capex(df,seed,length=1)
s=pd.Series([netcashfromoperatingactivities,capex])
if pd.isnull(s).all():
return None
m=pd.Series([1,-1])
fcf=(s*m).sum()
return float(fcf)
#add a length2 paramater so we take the sums of cash flows
def get_cashflowonassets(self,df,seed=-1,length1=20,length2=4):
cfoas=[]
for i in range(seed,seed-length1,-1):
start_assets=self.get_value(df,'totalassets',i-length2)
end_assets=self.get_value(df,'totalassets',i)
fcfs=[]
for k in range(i,i-length2,-1):
fcf=self.get_freecashflow(df,k)
fcfs.append(fcf)
if pd.isnull(fcfs).any():
return None
total_fcf=pd.Series(fcfs).sum()
avg_assets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([total_fcf,avg_assets]).any() or avg_assets==0:
return None
else:
cfoas.append(total_fcf/avg_assets)
if pd.isnull(cfoas).any():
return None
else:
if pd.isnull(stats.gmean(1+pd.Series(cfoas))-1):
return None
else:
return stats.gmean(1+pd.Series(cfoas))-1 #we want to punish variability because the higher number the better
def get_roa(self,df,seed=-1,length=4):
netincome=self.get_sum_quarters(df,'netincome',seed,length)
start_assets=self.get_value(df,'totalassets',seed-length)
end_assets=self.get_value(df,'totalassets',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([netincome,totalassets]).any() or totalassets==0:
return None
roa=netincome/totalassets
return float(roa)
def get_roc(self,df,seed=-1,length=4):
ebit=self.get_ebit(df,seed,length)
dividends=self.get_sum_quarters(df,'paymentofdividends',seed,length)
start_debt=self.get_netdebt(df,seed-length)
end_debt=self.get_netdebt(df,seed)
netdebt=pd.Series([start_debt,end_debt]).mean()
start_equity=self.get_value(df,'totalequity',seed-length)
end_equity=self.get_value(df,'totalequity',seed)
totalequity=pd.Series([start_equity,end_equity]).mean()
num=pd.Series([ebit,dividends]).sum()
den=pd.Series([netdebt,totalequity]).sum()
if pd.isnull([num,den]).any() or den==0:
return None
else:
roc=(float(num/den))
return float(roc)
def get_longtermroa(self,df,seed=-1,length1=20,length2=4):
roas=[]
for i in range(seed,seed-length1,-1):
roas.append(self.get_roa(df,i,length2))
if pd.isnull(roas).any():
return None
longtermroagmean=stats.gmean(1+pd.Series(roas))-1
if pd.isnull(longtermroagmean):
return None
return float(longtermroagmean)
def get_longtermroc(self,df,seed=-1,length1=20,length2=4):
rocs=[]
for i in range(seed,seed-length1,-1):
rocs.append(self.get_roc(df,i,length2))
rocs=pd.Series(rocs)
if pd.isnull(rocs).any():
return None
roc=stats.gmean(1+rocs)-1
if pd.isnull(roc):
return None
return float(roc)
def get_momentum(self,df,period=relativedelta(months=11)):
df=df[pd.to_datetime(df['date'])>=pd.to_datetime(df['date'].max())-period]
df=df['adj_close'].astype('float')
pctchange=df.pct_change()
pctchange=pctchange.dropna()
pctchange=1+pctchange
pctchange=pctchange.tolist()
gain=np.prod(pctchange)
return float(gain-1)
def get_fip(self,df,period=relativedelta(years=1)):
orig_df=df.copy()
df=df[pd.to_datetime(df['date'])>=pd.to_datetime(df['date'].max())-period]
df=df['adj_close'].astype('float')
pctchange=df.pct_change()
pctchange=pctchange.dropna()
if len(pctchange)==0:
return None
updays=(pctchange>0).sum()
downdays=(pctchange<0).sum()
fip=float(downdays)/float(len(pctchange))-float(updays)/float(len(pctchange))
if self.get_momentum(orig_df)<0:
fip=-1*fip
return fip #the lower the better
def get_balance_sheet_mean_value(self,df,tag,seed=-1,length=1):
start=self.get_value(df,tag,seed-length)
end=self.get_value(df,tag,seed)
if pd.isnull(pd.Series([start,end])).any() or start==0 or end==0:
return None
average=pd.Series([start,end]).mean()
if pd.isnull(average):
return None
else:
return float(average)
def get_dsri(self,df,seed1=-1,seed2=-5,length=4):
#seed1 and 2 are the quarters we are comparing between
#dsri=(ttmsdfcompany.iloc[-1]['accountsreceivable']/ttmsdfcompany.iloc[-1]['totalrevenue'])/(ttmsdfcompany.iloc[-5]['accountsreceivable']/ttmsdfcompany.iloc[-5]['totalrevenue'])
#accountsreceivable1=self.get_value(cik,'balance_sheet','accountsreceivable',seed1)
#accountsreceivable2=self.get_value(cik,'balance_sheet','accountsreceivable',seed2)
accountsreceivable1=self.get_balance_sheet_mean_value(df, 'accountsreceivable', seed1,length)
accountsreceivable2=self.get_balance_sheet_mean_value(df, 'accountsreceivable', seed2,length)
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([accountsreceivable1,accountsreceivable2,totalrevenue1,totalrevenue2]).any() or totalrevenue1==0 or totalrevenue2==0:
return None
num=accountsreceivable1/totalrevenue1
den=accountsreceivable2/totalrevenue2
if den==0:
return None
dsri=num/den
return float(dsri)
def get_gmi(self,df,seed1=-1,seed2=-5,length=4):
#gmi=((ttmsdfcompany.iloc[-5]['totalrevenue']-ttmsdfcompany.iloc[-5]['totalcostofrevenue'])/ttmsdfcompany.iloc[-5]['totalrevenue'])/((ttmsdfcompany.iloc[-1]['totalrevenue']-ttmsdfcompany.iloc[-1]['totalcostofrevenue'])/ttmsdfcompany.iloc[-1]['totalrevenue'])
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
totalcostofrevenue1=self.get_sum_quarters(df,'totalcostofrevenue',seed1,length)
totalcostofrevenue2=self.get_sum_quarters(df,'totalcostofrevenue',seed2,length)
if pd.isnull([totalrevenue1,totalrevenue2,totalcostofrevenue1,totalcostofrevenue2]).any():
return None
if totalrevenue2==0 or totalrevenue1==0:
return None
num=(totalrevenue2-totalcostofrevenue2)/totalrevenue2
den=(totalrevenue1-totalcostofrevenue1)/totalrevenue1
gmi=num/den
if den==0:
return None
return float(gmi)
def get_aqi(self,df,seed1=-1,seed2=-5):
#https://www.oldschoolvalue.com/blog/investment-tools/beneish-earnings-manipulation-m-score/
#otherlta1=companydf.iloc[-1]['totalassets']-(companydf.iloc[-1]['totalcurrentassets']+companydf.iloc[-1]['netppe'])
#otherlta2=companydf.iloc[-5]['totalassets']-(companydf.iloc[-5]['totalcurrentassets']+companydf.iloc[-5]['netppe'])
# aqi=(otherlta1/companydf.iloc[-1]['totalassets'])/(otherlta2/companydf.iloc[-5]['totalassets'])
netppe1=self.get_value(df,'netppe',seed1)
netppe2=self.get_value(df,'netppe',seed2)
totalassets1=self.get_value(df,'totalassets',seed1)
totalassets2=self.get_value(df,'totalassets',seed2)
totalcurrentassets1=self.get_value(df,'totalcurrentassets',seed1)
totalcurrentassets2=self.get_value(df,'totalcurrentassets',seed2)
if pd.isnull([netppe1,netppe2,totalassets1,totalassets2,totalcurrentassets1,totalcurrentassets2]).any():
return None
a=totalassets1-totalcurrentassets1-netppe1
b=totalassets2-totalcurrentassets2-netppe2
if totalassets1==0 or totalassets2==0:
return None
num=a/totalassets1
den=b/totalassets2
if den==0:
return None
aqi=num/den
return float(aqi)
def get_sgi(self,df,seed1=-1,seed2=-5,length=4):
#sgi=ttmsdfcompany.iloc[-1]['totalrevenue']/ttmsdfcompany.iloc[-5]['totalrevenue']
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([totalrevenue1,totalrevenue2]).any():
return None
if totalrevenue2==0:
return None
sgi=totalrevenue1/totalrevenue2
return float(sgi)
def get_depi(self,df,seed1=-1,seed2=-5,length=4):
#depit=ttmsdfcompany.iloc[-1]['depreciationexpense']/(ttmsdfcompany.iloc[-1]['depreciationexpense']+ttmsdfcompany.iloc[-1]['netppe'])
#depit1=ttmsdfcompany.iloc[-5]['depreciationexpense']/(ttmsdfcompany.iloc[-5]['depreciationexpense']+ttmsdfcompany.iloc[-5]['netppe'])
#depi=depit1/depit
depreciationexpense1=self.get_sum_quarters(df,'depreciationexpense',seed1,length)
depreciationexpense2=self.get_sum_quarters(df,'depreciationexpense',seed2,length)
netppe1=self.get_balance_sheet_mean_value(df, 'netppe', seed1,length)
netppe2=self.get_balance_sheet_mean_value(df, 'netppe', seed2,length)
if pd.isnull([depreciationexpense1,depreciationexpense2,netppe1,netppe2]).any():
return None
num=depreciationexpense2/(depreciationexpense2+netppe2)
den=depreciationexpense1/(depreciationexpense1+netppe1)
if den==0:
return None
depi=num/den
return float(depi)
def get_sgai(self,df,seed1=-1,seed2=-5,length=4):
#sgait=ttmsdfcompany.iloc[-1]['sgaexpense']/ttmsdfcompany.iloc[-1]['totalrevenue']
#sgait1=ttmsdfcompany.iloc[-5]['sgaexpense']/ttmsdfcompany.iloc[-5]['totalrevenue']
#sgai=sgait/sgait1
sgaexpense1=self.get_sum_quarters(df,'sgaexpense',seed1,length)
sgaexpense2=self.get_sum_quarters(df,'sgaexpense',seed2,length)
totalrevenue1=self.get_sum_quarters(df,'totalrevenue',seed1,length)
totalrevenue2=self.get_sum_quarters(df,'totalrevenue',seed2,length)
if pd.isnull([sgaexpense1,sgaexpense2,totalrevenue1,totalrevenue2]).any():
return None
if totalrevenue1==0 or totalrevenue2==0:
return None
num=sgaexpense1/totalrevenue1
den=sgaexpense2/totalrevenue2
if den==0:
return None
sgai=num/den
return float(sgai)
def get_lvgi(self,df,seed1=-1,seed2=-5):
"""
lvgit=(companydf.iloc[-1]['longtermdebt']+companydf.iloc[-1]['totalcurrentliabilities'])/companydf.iloc[-1]['totalassets']
lvgit1=(companydf.iloc[-5]['longtermdebt']+companydf.iloc[-5]['totalcurrentliabilities'])/companydf.iloc[-5]['totalassets']
lvgi=lvgit/lvgit1
"""
longtermdebt1=self.get_value(df,'longtermdebt',seed1)
longtermdebt2=self.get_value(df,'longtermdebt',seed2)
shorttermdebt1=self.get_value(df,'shorttermdebt',seed1)
shorttermdebt2=self.get_value(df,'shorttermdebt',seed2)
totalassets1=self.get_value(df,'totalassets',seed1)
totalassets2=self.get_value(df,'totalassets',seed2)
if pd.isnull([longtermdebt1,longtermdebt2,shorttermdebt1,shorttermdebt2,totalassets1,totalassets2]).any() or totalassets1==0 or totalassets2==0:
return None
num=(longtermdebt1+shorttermdebt1)/totalassets1
den=(longtermdebt2+shorttermdebt2)/totalassets2
if den==0:
return None
lvgi=num/den
return float(lvgi)
def get_tata(self,df,seed=-1,length=4):
#tata=(ttmsdfcompany.iloc[-1]['netincomecontinuing']-ttmsdfcompany.iloc[-1]['netcashfromoperatingactivities'])/ttmsdfcompany.iloc[-1]['totalassets']
netincomecontinuing=self.get_sum_quarters(df,'netincomecontinuing',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(df,'netincomecontinuing',seed,length)
#totalassets=self.get_value(cik,'balance_sheet','totalassets',seed)
start_assets=self.get_value(df,'totalassets',seed-length)
end_assets=self.get_value(df,'totalassets',seed)
if pd.isnull([start_assets,end_assets]).any() or start_assets==0 or end_assets==0:
return None
totalassets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([netincomecontinuing,totalassets,netcashfromoperatingactivities]).any() or totalassets==0:
return None
tata=(netincomecontinuing-netcashfromoperatingactivities)/totalassets
return float(tata)
def get_probm(self,df,seed1=-1,seed2=-5,length=4):
#probmarray=[-4.84,.92*dsri,.528*gmi,.404*aqi,.892*sgi,.115*depi,-1*.172*sgai,-1*.327*lvgi,4.697*tata]
#https://www.oldschoolvalue.com/blog/investment-tools/beneish-earnings-manipulation-m-score/
dsri=self.get_dsri(df,seed1,seed2,length)
gmi=self.get_gmi(df,seed1,seed2,length)
aqi=self.get_aqi(df,seed1,seed2)
sgi=self.get_sgi(df,seed1,seed2,length)
depi=self.get_depi(df,seed1,seed2,length)
sgai=self.get_sgai(df,seed1,seed2,length)
lvgi=self.get_lvgi(df,seed1,seed2)
tata=self.get_tata(df,seed1,length)
probmarray=[dsri,gmi,aqi,sgi,depi,sgai,lvgi,tata]
if pd.isnull(probmarray).all():
return None
m=[.92,.528,.404,.892,.115,-.172,-.327,4.697]
s=pd.Series(probmarray)
m=pd.Series(m)
probm=s.multiply(m).sum()
if probm is None:
return None
else:
probm=probm-4.84
return float(probm)
def get_pman(self,df,seed1=-1,seed2=-5,length=4):
probm=self.get_probm(df,seed1,seed2,length)
if pd.isnull(probm):
return None
pman=stats.norm.cdf(probm)
return float(pman)
def get_mta(self,df,pricesdf,seed=-1):
#market cap + book value of liabilities
marketcap=self.get_market_cap(df,pricesdf,seed)
totalliabilities=self.get_value(df,'totalliabilities',seed)
if pd.isnull([marketcap,totalliabilities]).any():
return None
s=pd.Series([marketcap,totalliabilities])
m=pd.Series([1,1])
r=s.multiply(m).sum()
if pd.isnull(r):
return None
mta=float(r)
return mta
def get_nimta(self,df,prices,seed=-1):
values=[]
mtas=[]
for i in range(seed,seed-4,-1):
values.append(self.get_value(df,'netincome',i))
mtas.append(self.get_mta(df,prices,i))
values=pd.Series(values)
mtas=pd.Series(mtas)
values=values/mtas
m=pd.Series([.5333,.2666,.1333,.0666])
nimta=values.multiply(m).sum()
if pd.isnull(nimta):
return None
else:
return float(nimta)
def get_tlmta(self,df,pricesdf,seed=-1):
totalliabilities=self.get_value(df,'totalliabilities',seed)
mta=self.get_mta(df,pricesdf,seed)
if pd.isnull([mta,totalliabilities]).any() or mta==0:
return None
tlmta=totalliabilities/mta
return float(tlmta)
def get_cashmta(self,df,pricesdf,seed=-1):
mta=self.get_mta(df,pricesdf,seed)
cashandequivalents=self.get_value(df,'cashandequivalents',seed)
if pd.isnull([mta,cashandequivalents]).any() or mta==0:
return None
cashmta=cashandequivalents/mta
return float(cashmta)
def get_sigma(self,prices,seed=-1,days=90):
prices=prices.sort_values('date')
pctchange=prices['adj_close'].pct_change(periods=252)
if seed==-1:
pctchange=pctchange[pd.to_datetime(prices['date']).dt.date>=datetime.today().date()-relativedelta(days=days)]
sigma=pctchange.std()
return float(sigma)
else:
exit()
def get_mb(self,df,pricesdf,seed=-1):
mta=self.get_mta(df,pricesdf,seed)
totalequityandnoncontrollinginterests=self.get_value(df,'totalequityandnoncontrollinginterests',seed)
marketcap=self.get_market_cap(df,pricesdf,seed)
if pd.isnull([marketcap,totalequityandnoncontrollinginterests,mta]).any():
return None
den=(totalequityandnoncontrollinginterests+.1*marketcap)
if pd.isnull(den) or den==0:
return None
mb=mta/den
return float(mb)
def get_pfd_price(self,pricesdf,seed=-1):
pricesdf=pricesdf.sort_values('date')
if seed==-1:
price=pricesdf['adj_close'].iloc[-1]
else:
exit()
if pd.isnull(price) or price==0:
return None
price=float(price)
if price>15:
price=float(15)
price=math.log(price,10)
return price
def get_exretavg(self,pricesdf,snpfd,seed=-1):
pricesdf=pricesdf.sort_values('date')
pricesdf['adj_close']=pricesdf['adj_close'].astype('float')
snpfd=snpfd.sort_values('date')
snpfd['adj_close']=snpfd['adj_close'].astype('float')
exrets=[]
if seed==-1:
end_date=datetime.now().date()
else:
exit()
for i in range(4):#do this 3 times
start_date=end_date-relativedelta(months=3)
sp1=snpfd[pd.to_datetime(snpfd['date']).dt.date<=start_date]['adj_close'].iloc[-1] #self.get_price('$SPX',start_date.strftime('%Y-%m-%d'))
sp2=snpfd[pd.to_datetime(snpfd['date']).dt.date<=end_date]['adj_close'].iloc[-1]
c1=pricesdf[
|
pd.to_datetime(pricesdf['date'])
|
pandas.to_datetime
|
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
)
tm.assert_frame_equal(frame, expected)
def test_frame_from_records_utc(self):
rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index="begin_time")
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
|
tm.assert_index_equal(indexed_frame.index, index)
|
pandas._testing.assert_index_equal
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import numpy as np
from pandas.core.api import (Index, Series, TimeSeries, DataFrame, isnull)
import pandas.core.datetools as datetools
from pandas.util.testing import assert_series_equal
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# Series test cases
class TestSeries(unittest.TestCase):
def setUp(self):
self.ts = common.makeTimeSeries()
self.series = common.makeStringSeries()
self.objSeries = common.makeObjectSeries()
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(common.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assertRaises(Exception, Series, [0, 1, 2], index=None)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_corner(self):
df = common.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(common.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, series._set_index, None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, series._set_index,
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series.fromValue(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
strings = Series.fromValue('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
d = datetime.now()
dates = Series.fromValue(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
def test_contains(self):
common.assert_contains_all(self.ts.index, self.ts)
def test_save_load(self):
self.series.save('tmp1')
self.ts.save('tmp3')
unp_series = Series.load('tmp1')
unp_ts = Series.load('tmp3')
os.remove('tmp1')
os.remove('tmp3')
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(Exception, self.ts.__getitem__, d),
def test_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(common.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(common.makeIntIndex(20).astype(float),
index=
|
common.makeIntIndex(20)
|
pandas.util.testing.makeIntIndex
|
import pandas as pd
import difflib as dfl
import click
import random
import datetime
import moment
common_companies = ['Self Employed', 'Amazon Web Services']
common_positions = {
'Chief Executive Officer': 'CEO',
'CEO': 'CEO',
'Co-Founder & CEO': 'CEO',
'CEO & Founder': 'CEO',
'Vice President': 'VP'
}
def getPositionMatch(position):
#for p in common_positions.keys:
matches = dfl.get_close_matches(position, common_positions.keys(), 1)
if len(matches) > 0:
return common_positions[matches[0]]
return position
def getCompanyMatch(company):
#for c in common_companies:
matches = dfl.get_close_matches(company, common_companies, 1)
if len(matches) > 0:
return matches[0]
return company
def closeMatches(df, row, fieldName, matchFunction):
#print("\n\n====")
#print(row)
# if not df.iloc[row]: return "No Company"
c1 = str(df.iloc[row][fieldName])
if not c1: return "None"
#print(c1)
return matchFunction(c1)
def summarizeByField(df, fieldName, matchFunction):
print(matchFunction("self-employed"))
g = df.groupby(lambda row: closeMatches(df, row, fieldName, matchFunction))
gSorted = g.size().sort_values(ascending=False)
print("\n==== SUMMARY ===")
print(gSorted.head(50))
#return
for i in range(0,50):
fieldValue = gSorted.index[i]
size = gSorted[i]
peopleList = g.indices[fieldValue]
print (fieldValue, " :: ", size)
#print (peopleList)
randomPeople = random.sample(list(peopleList), min(5, size))
for j in randomPeople:
randomPerson = df.iloc[j]
print(" ", randomPerson['First Name'], randomPerson['Last Name'], " (", \
randomPerson['Position'], ", ", randomPerson['Company'], ")")
def messagesOld(mdf):
#print(mdf)
mdf['OTHER'] = mdf.apply(lambda x: x['TO'] if x['FROM'] == '<NAME>' else x['FROM'], axis=1)
filteredStart = mdf[mdf['DATETIME'] < pd.to_datetime(moment.date("1 year ago").date)]
filteredByDate = filteredStart[filteredStart['DATETIME'] > pd.to_datetime(moment.date("2 years ago").date)]
fileredByFolder = filteredByDate[filteredByDate['FOLDER'] == 'INBOX']
groupedConversations = filteredByDate.groupby('OTHER')
#multipleConversations = groupedConversations.filter(lambda x: len(x) > 1)
#print(multipleConversations)
#sampleConversations = multipleConversations.sample(frac=0.1)
for key, conversations in groupedConversations.groups.items():
if len(conversations) <2:
continue
sent = 0
for c in conversations:
if mdf.iloc[c]['FROM'] == '<NAME>':
sent = sent + 1
if sent == 0:
continue
if random.random() > 0.1:
continue
print("\n===\n{}\n===".format(key))
for c in conversations:
print(" [{}] {}".format(mdf.iloc[c]['DATETIME'], mdf.iloc[c]['CONTENT']))
return
@click.command()
@click.option('--linkedindir', default="exported", help='Folder where the LinkedIn Data is unzipped')
@click.option('--company/--no-company', default=True, help="Print Company Analysis")
@click.option('--position/--no-position', default=True, help="Print Position Analysis")
def linkedinAnalysis(linkedindir, company, position):
"""Analyzes your LinkedIn Data Export to find people you can get in touch with"""
# execute only if run as a script
connectionscsv = linkedindir + "/Connections.csv"
messagescsv = linkedindir + "/Messages.csv"
print("Reading file... ", connectionscsv)
df = pd.read_csv(connectionscsv)
print("done")
print(df.head())
print("Reading messages from: ", messagescsv)
mdf =
|
pd.read_csv(messagescsv)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statistics import mean
def InputsSolarMarket():
# ------------------------ Inputs ------------------------------#
# Day Ahead Energy Market 2018
dataset_E = pd.read_csv('in/all_ercot_profiles_hourly_2018.csv')
E_price = dataset_E.iloc[0:8762, 4].values # Creating the price Vector $/MWh, start at 1/1/2018 00:00 CST
# Day Ahead Market - AS Down Regulation ERCOT hourly prices 2018
dataset_AS = pd.read_csv('in/AS_price.csv')
ASM_price = dataset_AS.iloc[70080:78840, 9].values # Creating the price Vector $/MWh, start at 1/1/2018 00:00 CST
# Solar CF
dataset_solar = pd.read_csv('in/all_ercot_profiles_hourly_2018.csv') # Reading the dataset of solar gen CF
solar_cf = dataset_solar.iloc[0:8762, 1].values # Creating the solar generation Vector, start 1/1/2018 00:00 (CST)
return E_price, solar_cf, ASM_price
def InputsSolarUncertainMul(eta):
# ---------Imports ------#
data = pd.DataFrame(pd.read_csv('in/all_ercot_profiles_hourly.csv', sep=';')) # Reading the dataset of solar gen CF
dataset_solar = data.loc[:, ['year', 'CF_model_solar']]
data_raw2015 = pd.DataFrame(pd.read_csv('in/solar_TX.csv', sep=';')) # Reading the dataset of solar gen CF
data2015 = data_raw2015.iloc[0:8760, 5].values.tolist()
df_years = pd.DataFrame({'CF_2015': data2015, 'CF_2016': dataset_solar.loc[
dataset_solar['year'] == 2016, 'CF_model_solar'].values.tolist(), 'CF_2017': dataset_solar.loc[
dataset_solar['year'] == 2017, 'CF_model_solar'].values.tolist(), 'CF_2018': dataset_solar.loc[
dataset_solar['year'] == 2018, 'CF_model_solar'].values.tolist()})
df = df_years.stack()
# --------Summary statistics - annual average day repeated ---#
df_years['Av_CF'] = df_years.mean(axis=1)
df_years['Std_CF'] = df_years.std(axis=1)
mean_cf = np.array(df_years['Av_CF'])
std_cf = np.array(df_years['Std_CF'])
# Inverse cdf for average year
#inv_cdf = stat.mean([np.percentile(df_years['CF_2015'], eta), np.percentile(df_years['CF_2016'], eta), np.percentile(df_years['CF_2017'], eta), np.percentile(df_years['CF_2018'], eta)])
#Above is for the stacked version - no!
#inv_cdf = np.percentile(df_years['Av_CF'], eta)
inv_cdf_raw = np.percentile(df_years['Av_CF'], eta)
inv_cdf = np.array([inv_cdf_raw for i in range(8760)])
# --------Create plots of cdf --------------#
num_bins = int(np.ceil(np.sqrt(8760)))
data = df_years['CF_2015']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='darkcyan', label='2015')
data = df_years['CF_2016']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='powderblue', label='2016')
data = df_years['CF_2017']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='darkturquoise', label='2017')
data = df_years['CF_2018']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='yellowgreen', label='2018')
data = df_years['Av_CF']
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='black', label='Av')
data = df
counts, bin_edges = np.histogram(data, bins=num_bins)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1], color='red', label='Stack')
plt.xlabel('Solar Capacity Factor', fontsize=10)
plt.ylabel('CDF', fontsize=10)
plt.title('Multi-year and average solar capacity factor', fontsize=12)
plt.legend()
plt.show()
return mean_cf, std_cf, inv_cdf
def InputsSolarUncertainHourly(eta, seasonal): #not used right now
# ---------Imports ------#
data = pd.DataFrame(pd.read_csv('in/all_ercot_profiles_hourly.csv', sep=';'))
dataset_solar_raw = data.loc[:, ['local_time_hb', 'CF_model_solar']]
data_raw2015 = pd.DataFrame(pd.read_csv('in/solar_TX.csv', sep=';')) # Reading the dataset of solar gen CF
data2015 =
|
pd.DataFrame(data_raw2015.iloc[0:8760, 5])
|
pandas.DataFrame
|
# run_all_Tiger.py: version of script to run on tiger with many runs and Gurobi
# authors: <NAME>, <NAME>
# email: <EMAIL>, <EMAIL>
# created: June 8, 2021
# first, we'll use the built-in function to download the RTS-GMLC system to Prescicent/downloads/rts_gmlc
import prescient.downloaders.rts_gmlc as rts_downloader
import prescient.scripts.runner as runner
import os
import pandas as pd
import shutil
import numpy as np
import time
os.chdir("..")
os.chdir("..")
# the download function has the path Prescient/downloads/rts_gmlc hard-coded.
# We don't need the code below as long as we've already downloaded the RTS data into the repo (or run rts_gmlc.py)
# All it does is a 'git clone' of the RTS-GMLC repo
# rts_downloader.download()
# did_download = rts_downloader.download()
# if did_download:
# rts_downloader.copy_templates()
# rts_downloader.populate_input_data()
# variables to adjust:
runs = 750
directory_out = "--output-directory=output"
dir_path = "./rts_gmlc"
path_template = "./scenario_"
# all zone 1 file paths
file_paths_combined = ['./timeseries_data_files/101_PV_1_forecasts_actuals.csv','./timeseries_data_files/101_PV_2_forecasts_actuals.csv',
'./timeseries_data_files/101_PV_3_forecasts_actuals.csv','./timeseries_data_files/101_PV_4_forecasts_actuals.csv',
'./timeseries_data_files/102_PV_1_forecasts_actuals.csv','./timeseries_data_files/102_PV_2_forecasts_actuals.csv',
'./timeseries_data_files/103_PV_1_forecasts_actuals.csv','./timeseries_data_files/104_PV_1_forecasts_actuals.csv',
'./timeseries_data_files/113_PV_1_forecasts_actuals.csv','./timeseries_data_files/118_RTPV_1_forecasts_actuals.csv',
'./timeseries_data_files/118_RTPV_2_forecasts_actuals.csv','./timeseries_data_files/118_RTPV_3_forecasts_actuals.csv',
'./timeseries_data_files/118_RTPV_4_forecasts_actuals.csv','./timeseries_data_files/118_RTPV_5_forecasts_actuals.csv',
'./timeseries_data_files/118_RTPV_6_forecasts_actuals.csv','./timeseries_data_files/118_RTPV_7_forecasts_actuals.csv',
'./timeseries_data_files/118_RTPV_8_forecasts_actuals.csv','./timeseries_data_files/118_RTPV_9_forecasts_actuals.csv',
'./timeseries_data_files/118_RTPV_10_forecasts_actuals.csv','./timeseries_data_files/119_PV_1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_101_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_102_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_103_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_104_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_105_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_106_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_107_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_108_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_109_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_110_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_111_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_112_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_113_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_114_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_115_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_116_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_117_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_118_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_119_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_120_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_121_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_122_Load_zone1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_123_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_124_Load_zone1_forecasts_actuals.csv','./timeseries_data_files/Bus_214_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_223_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/215_PV_1_forecasts_actuals.csv', './timeseries_data_files/Bus_210_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/213_RTPV_1_forecasts_actuals.csv', './timeseries_data_files/Bus_218_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/222_HYDRO_2_forecasts_actuals.csv', './timeseries_data_files/Bus_207_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/201_HYDRO_4_forecasts_actuals.csv', './timeseries_data_files/Bus_203_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_204_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/RTPV_zone2_forecasts_actuals.csv',
'./timeseries_data_files/215_HYDRO_3_forecasts_actuals.csv', './timeseries_data_files/Hydro_zone2_forecasts_actuals.csv',
'./timeseries_data_files/222_HYDRO_4_forecasts_actuals.csv', './timeseries_data_files/215_HYDRO_1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_217_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_220_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_208_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/222_HYDRO_6_forecasts_actuals.csv',
'./timeseries_data_files/Bus_213_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_224_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_202_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_219_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_206_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/222_HYDRO_1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_211_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/222_HYDRO_3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_222_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_215_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/222_HYDRO_5_forecasts_actuals.csv', './timeseries_data_files/Bus_212_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_221_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_216_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/PV_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_209_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/215_HYDRO_2_forecasts_actuals.csv', './timeseries_data_files/Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_201_Load_zone2_forecasts_actuals.csv', './timeseries_data_files/Bus_205_Load_zone2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_309_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/320_RTPV_2_forecasts_actuals.csv',
'./timeseries_data_files/Bus_316_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_321_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/313_PV_2_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_7_forecasts_actuals.csv',
'./timeseries_data_files/313_RTPV_10_forecasts_actuals.csv', './timeseries_data_files/310_PV_1_forecasts_actuals.csv',
'./timeseries_data_files/Bus_312_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_325_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_305_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/309_WIND_1_forecasts_actuals.csv',
'./timeseries_data_files/313_RTPV_5_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_12_forecasts_actuals.csv',
'./timeseries_data_files/314_PV_2_forecasts_actuals.csv', './timeseries_data_files/Bus_301_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/314_PV_4_forecasts_actuals.csv', './timeseries_data_files/PV_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_306_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_319_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/322_HYDRO_1_forecasts_actuals.csv',
'./timeseries_data_files/320_RTPV_6_forecasts_actuals.csv', './timeseries_data_files/324_PV_3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_302_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_315_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_322_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_1_forecasts_actuals.csv',
'./timeseries_data_files/308_RTPV_1_forecasts_actuals.csv', './timeseries_data_files/322_HYDRO_3_forecasts_actuals.csv',
'./timeseries_data_files/324_PV_1_forecasts_actuals.csv', './timeseries_data_files/317_WIND_1_forecasts_actuals.csv',
'./timeseries_data_files/313_RTPV_9_forecasts_actuals.csv', './timeseries_data_files/Bus_311_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/320_RTPV_4_forecasts_actuals.csv', './timeseries_data_files/Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/322_HYDRO_4_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_6_forecasts_actuals.csv',
'./timeseries_data_files/314_PV_1_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_11_forecasts_actuals.csv',
'./timeseries_data_files/303_WIND_1_forecasts_actuals.csv', './timeseries_data_files/320_RTPV_3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_304_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_324_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/WIND_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_313_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/310_PV_2_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_4_forecasts_actuals.csv',
'./timeseries_data_files/313_RTPV_13_forecasts_actuals.csv', './timeseries_data_files/314_PV_3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_308_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_320_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_317_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/320_RTPV_1_forecasts_actuals.csv',
'./timeseries_data_files/313_PV_1_forecasts_actuals.csv', './timeseries_data_files/324_PV_2_forecasts_actuals.csv',
'./timeseries_data_files/Hydro_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_310_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_323_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/Bus_314_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/313_RTPV_2_forecasts_actuals.csv', './timeseries_data_files/RTPV_zone3_forecasts_actuals.csv',
'./timeseries_data_files/312_PV_1_forecasts_actuals.csv', './timeseries_data_files/319_PV_1_forecasts_actuals.csv',
'./timeseries_data_files/320_PV_1_forecasts_actuals.csv', './timeseries_data_files/313_RTPV_8_forecasts_actuals.csv',
'./timeseries_data_files/320_RTPV_5_forecasts_actuals.csv', './timeseries_data_files/Bus_303_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_307_Load_zone3_forecasts_actuals.csv',
'./timeseries_data_files/Bus_318_Load_zone3_forecasts_actuals.csv', './timeseries_data_files/322_HYDRO_2_forecasts_actuals.csv']
# smaller set for testing
file_paths_test = ['./timeseries_data_files/101_PV_1_forecasts_actuals.csv','./timeseries_data_files/101_PV_2_forecasts_actuals.csv']
def read_files(file_paths):
# file_paths: list of strings indicating file paths that are to be read in
# output: data_lst - list of data frames containing all the information in each file
# Note: we add to a list and then concatenate as this is faster and takes less memory than growing the dataframe
# each time
data_lst = []
i = 0
bus_names = []
# iterate across file paths
for path in file_paths:
data = pd.read_csv(path) # read in the file
# rename the columns to be useful
# the numbers below are hard coded for this particular case - they will have to change if the file structure
# changes too
data.columns = ['Time', path[24:-22]+'_forecasts', path[24:-22]+'_actuals']
bus_names.append(path[24:-22]) # gives us a list of bus_names which we can use later on
# if this is our first one, append all columns (including date/time), otherwise, just append forecasts/actuals
# note: this assumes that all files have the exact same dates and times, which is supported in this case, but
# may not be true generally
if i == 0:
data_lst.append(data)
else:
data_lst.append(data[[path[24:-22]+'_forecasts', path[24:-22]+'_actuals']])
i += 1
return data_lst, bus_names
def filter_no_solar(combined_data, determining_solar_plant):
# combined_data: data frame of all forecasts and actuals for a list of buses
# output: two data frames called s_data and ns_data.
# This function filters all data into two parts - one where solars are active and one where solars are inactive
# we will do this in a pretty naive way, simply based on one of the solar plants, which we are going to hard code
# this is not ideal, but it should do for now
ns_data = combined_data[combined_data[determining_solar_plant + '_forecasts'] == 0]
#ns_data.to_csv('zz_no_solar_data.csv') # print out results as a test
s_data = combined_data[combined_data[determining_solar_plant + '_forecasts'] != 0]
#s_data.to_csv("zz_solar_data.csv")
return ns_data, s_data
def compute_actual_forecast_quotient(data, bus_names):
# data: data frame of forecasts and actuals, in the pattern of: forecast, actual
# output: modified version of data containing additional columns with the quotient of actual / forecasts
# iterate across bus names and take the relevant quotients
for name in bus_names:
temp_nm = name + '_quotient'
data = data.assign(temp_nm=np.minimum(data[name+'_actuals'] / data[name+'_forecasts'], 1.5))
data.rename(columns={'temp_nm':temp_nm}, inplace=True)
# get rid of NaNs and Infs
# NaNs arise when we have 0/0, Infs arrive when we have x / 0, where x > 0
data.fillna(0, inplace=True)
data.replace(np.inf, 0, inplace=True)
return data
def sample_quotients(pre_sunrise_hrs, post_sunset_hrs, s_data, ns_data):
# pre_sunrise_hrs: number of hours before sunrise for the day we want to sample
# post_sunset_hrs: number of hours after sunset for the day we want to sample
# s_data: data frame of the active solar hours
# ns_data: data frame of the inactive solar hours
ns_quotients = ns_data.filter(regex='quotient$', axis=1)
s_quotients = s_data.filter(regex='quotient$', axis=1)
pre_sunrise_sample = ns_quotients.sample(pre_sunrise_hrs, replace=True) # samples quotients for pre sunrise hours
post_sunset_sample = ns_quotients.sample(post_sunset_hrs, replace=True) # samples quotients for post sunset hours
# samples quotients for daylight hours
daylight_sample = s_quotients.sample(24 - pre_sunrise_hrs - post_sunset_hrs, replace=True)
frames = [pre_sunrise_sample, daylight_sample, post_sunset_sample]
day_sample = pd.concat(frames)
return day_sample
def apply_day_quotients(quotients, day, file_paths):
# quotients: dataframe with all the quotients to apply
# day: string version of what day to modify with the quotients in form YYYY-MM-DD
# output: None - directly modify the time series files to apply the quotients and writes to file
# if (day == "2020-07-09"):
# beg = 4561
# end = 4585
# elif (day == "2020-07-10"):
# beg = 4585
# end = 4609
# elif (day == "2020-07-11"):
# beg = 4609
# end = 4633
for path in file_paths:
file_data = pd.read_csv(path)
count = 0
file_data = file_data.set_index('datetime')
dts = pd.Series(pd.date_range(day, periods=24, freq='H'))
t = dts.dt.strftime('%Y-%m-%d %H:%M:%S')
file_data.loc[t, 'actuals'] = file_data.loc[t, 'forecasts'] * quotients[path[24:-22] + "_quotient"].tolist()
file_data = file_data.truncate(before = '2020-07-09', after = '2020-07-12')
# for index, row in file_data.iterrows():
# if(row['datetime'].startswith(day)):
# row['actuals'] = row['forecasts'] * quotients.iloc[count, : ].loc[path[24:-22] + "_quotient"]
# count += 1
# file_data.iloc[index,:] = row
# for index in range(beg, end):
# file_data["actuals"].iat[index] = file_data['forecasts'].iat[index] * quotients.iloc[count, : ].loc[path[24:-22] + "_quotient"]
# count += 1
# file_data.to_csv(path, index=False)
file_data.to_csv(path, index=True)
# run all the data perturbation functions as a function call -> should be in working directory when called and will remain.
def perturb_data(file_paths, solar_path, no_solar_path):
# file_paths: list of strings that tell us where the timeseries data files are located
# solar_path: file path of the forecast, actuals, and quotients for the active solar hours for the year
# no_solar_path: file path of the the forecast, actuals, and quotients for the non-active solar hours for the year
# output: None - modifies the timeseries data files in place via apply_day_quotients
path = os.getcwd()
os.chdir("..")
solar_data_1 =
|
pd.read_csv(solar_path)
|
pandas.read_csv
|
import timeit
from typing import List, Union
import pandas as pd
import numpy as np
import ray
from ray_shuffling_data_loader.stats import (TrialStatsCollector, TrialStats)
class BatchConsumer:
"""
Interface for consumers of the shuffle outputs.
"""
def consume(self, rank, epoch, batches):
"""
Consume the provided batches for the given trainer and epoch.
"""
raise NotImplementedError(
"Derived classes must implement consume method.")
def producer_done(self, rank, epoch):
"""
Signals to the consumer that we're done producing batches for the
given trainer and epoch.
"""
raise NotImplementedError(
"Derived classes must implement producer_done method.")
def wait_until_ready(self, epoch):
"""
Returns once the consumer is ready for this epoch to start.
"""
raise NotImplementedError(
"Derived classes must implement wait_until_ready method.")
def wait_until_all_epochs_done(self):
"""
Returns once all batches for all epochs have been consumed.
"""
raise NotImplementedError(
"Derived classes must implement wait_until_done method.")
#
# In-memory shuffling, loads data from disk once per epoch.
#
def shuffle(filenames: List[str],
batch_consumer: BatchConsumer,
num_epochs: int,
num_reducers: int,
num_trainers: int,
stats_collector: Union[TrialStatsCollector, None] = None,
) -> Union[TrialStats, float]:
"""
Shuffle the provided dataset every epoch.
Args:
filenames (str): Paths to input Parquet files.
batch_consumer (BatchConsumer): Consumer of shuffle outputs.
num_epochs (int): Number of training epochs.
num_reducers (int): The number of shuffler reducers.
num_trainers (int): Number of trainer workers.
stats_collector(Optional[TrialStatsCollector]): Shuffle stats
collector.
"""
start = timeit.default_timer()
for epoch_idx in range(num_epochs):
# Wait until consumer is ready for another epoch shuffle to start.
batch_consumer.wait_until_ready(epoch_idx)
shuffle_epoch(
epoch_idx, filenames, batch_consumer, num_reducers, num_trainers,
stats_collector)
batch_consumer.wait_until_all_epochs_done()
end = timeit.default_timer()
duration = end - start
if stats_collector is not None:
stats_collector.trial_done.remote(duration)
return duration
def shuffle_epoch(
epoch: int, filenames: List[str],
batch_consumer: BatchConsumer,
num_reducers: int, num_trainers: int,
stats_collector: Union[TrialStatsCollector, None] = None) -> None:
"""
Shuffle the provided dataset for the specified epoch.
Args:
epoch (int): Epoch for which we are shuffling.
filenames (str): Paths to input Parquet files.
batch_consumer (BatchConsumer): Consumer of shuffle outputs.
num_reducers (int): The number of shuffler reducers.
num_trainers (int): Number of trainer workers.
stats_collector(Optional[TrialStatsCollector]): Shuffle stats
collector.
"""
if stats_collector is not None:
stats_collector.epoch_start.remote(epoch)
reducers_partitions = []
for filename in filenames:
file_reducer_parts = shuffle_map.options(
num_returns=num_reducers).remote(
filename, num_reducers, stats_collector, epoch)
if not isinstance(file_reducer_parts, list):
file_reducer_parts = [file_reducer_parts]
reducers_partitions.append(file_reducer_parts)
shuffled = []
for reducer_idx, reducer_partitions in enumerate(
zip(*reducers_partitions)):
consumer_batches = shuffle_reduce.remote(
reducer_idx, stats_collector, epoch, *reducer_partitions)
shuffled.append(consumer_batches)
for rank, batches in enumerate(
np.array_split(shuffled, num_trainers)):
consume(rank, batch_consumer, epoch, list(batches))
@ray.remote
def shuffle_map(filename: str, num_reducers: int,
stats_collector: Union[TrialStatsCollector, None],
epoch: int) -> List[List[ray.ObjectRef]]:
"""
Map (data loading and row selection) stage of the shuffle.
Args:
filename (str): Path to input Parquet file.
num_reducers (int): The number of shuffler reducers.
stats_collector(Optional[TrialStatsCollector]): Shuffle stats
collector.
epoch (int): Epoch for which we are shuffling.
Returns:
num_reducers partitions, each randomly sampled (without replacement)
from rows in provided Parquet file.
"""
if stats_collector is not None:
stats_collector.map_start.remote(epoch)
start = timeit.default_timer()
# Load file.
rows = pd.read_parquet(filename)
assert len(rows) > num_reducers
end_read = timeit.default_timer()
# Create random reducer assignment.
reducer_assignment = np.random.randint(num_reducers, size=len(rows))
# Partition the rows into a partition per reducer.
reducer_parts = []
for reducer_idx in range(num_reducers):
reducer_part = rows[reducer_assignment == reducer_idx]
reducer_parts.append(reducer_part)
if len(reducer_parts) == 1:
reducer_parts = reducer_parts[0]
duration = timeit.default_timer() - start
read_duration = end_read - start
if stats_collector is not None:
stats_collector.map_done.remote(epoch, duration, read_duration)
return reducer_parts
@ray.remote
def shuffle_reduce(reduce_index: int,
stats_collector: Union[TrialStatsCollector, None],
epoch: int, *chunks: pd.DataFrame) -> List[pd.DataFrame]:
"""
Reduce (combine and shuffle) stage of the shuffle.
Args:
reduce_idx (int): The index (ID) of this reducer.
stats_collector(Optional[TrialStatsCollector]): Shuffle stats
collector.
epoch (int): Epoch for which we are shuffling.
*chunks (pd.DataFrame): DataFrame partition, one from each mapper.
Returns:
A concatenation and full shuffle of all provided mapper partitions.
"""
if stats_collector is not None:
stats_collector.reduce_start.remote(epoch)
start = timeit.default_timer()
# Concatenate chunks from all mapper partitions.
batch =
|
pd.concat(chunks)
|
pandas.concat
|
import os
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import re
date_now = datetime.now()
#
# data = pd.read_csv('/home/haishuowang/spider_data/2019-07-17/兰格钢铁网', sep='|', header=None)
# data.columns = ['Title', 'w_time', 'n_time', 'Link', 'Info']
# data = data[~data['Title'].duplicated(keep='first')]
#
# # x =
# data['deal_title'] = data.apply(lambda x: x['Title'].replace(f'{int(date_now.month)}月', '')
# .replace(f'{int(date_now.day)}日', '')
# , axis=1)
#
mid_word = ['稳', '→', '震荡', '平', ]
buy_word = ['涨', '上调', '↑', '上行', '强势', '走高']
sell_word = ['跌', '降', '下调', '探低', '↓', '下行', '弱势', '走低']
# # 方大特钢
#
#
file_name_list = ['全球金属网', '兰格钢铁网', '大宗内参', '海鑫钢网', '瑞达期货', '生意社', '西本新干线']
def contain(x, key_word, label=1):
for key in key_word:
if key in x:
return label
else:
return np.nan
def load_spot_data(read_path):
file_data = pd.read_csv(read_path, sep='|', header=None)
file_data.columns = ['Title', 'w_time', 'n_time', 'Link', 'Info']
file_data.index = pd.to_datetime(file_data['n_time'])
return file_data
def filer_target_word(raw_df):
target_df = raw_df[raw_df['Title'].str.contains('钢')]
return target_df
def get_file_pos(file_name):
root_path = '/home/haishuowang/temp'
date_list = sorted(os.listdir(root_path))
# file_name = '兰格钢铁网'
data_list = []
for target_date in date_list:
read_path = f'{root_path}/{target_date}/{file_name}'
if os.path.exists(f'{root_path}/{target_date}/{file_name}'):
file_data = pd.read_csv(read_path, sep='|', header=None)
file_data.columns = ['Title', 'w_time', 'n_time', 'Link', 'Info']
file_data.index = pd.to_datetime(file_data['n_time']) + timedelta(minutes=10)
file_data = file_data.sort_index()
mid = file_data['Title'].apply(lambda x: contain(x, mid_word, label=0))
mid.name = 'mid'
buy = file_data['Title'].apply(lambda x: contain(x, buy_word, label=1))
buy.name = 'buy'
sell = file_data['Title'].apply(lambda x: contain(x, sell_word, label=-1))
sell.name = 'sell'
mid_info = file_data['Info'].apply(lambda x: contain(x, mid_word, label=0))
mid_info.name = 'mid_info'
buy_info = file_data['Info'].apply(lambda x: contain(x, buy_word, label=1))
buy_info.name = 'buy_info'
sell_info = file_data['Info'].apply(lambda x: contain(x, sell_word, label=-1))
sell_info.name = 'sell_info'
# no_info = mid_info.isna() & buy_info.isna() & sell_info.isna()
part_info = pd.concat([file_data['Title'], mid, buy, sell, mid_info, buy_info, sell_info], axis=1)
data_list.append(part_info)
else:
print(target_date)
pass
all_info = pd.concat(data_list, axis=0)
all_info.to_csv(f'/home/haishuowang/PycharmProjects/{file_name}.csv')
return all_info
def get_spider_file_pos(file_name='生意社'):
root_path = '/home/haishuowang/spider_data'
date_list = sorted([x for x in os.listdir(root_path) if len(x) == 10 and '-' in x and x > '2019-07-18'])
data_list = []
for target_date in date_list:
read_path = f'/home/haishuowang/spider_data/{target_date}/{file_name}'
if os.path.exists(f'{root_path}/{target_date}/{file_name}'):
file_data = load_spot_data(read_path)
file_data = filer_target_word(file_data)
file_data.index =
|
pd.to_datetime(file_data['n_time'])
|
pandas.to_datetime
|
"""Ingest eBird data."""
from pathlib import Path
import pandas as pd
from . import db, util
from .util import log
DATASET_ID = 'ebird'
RAW_DIR = Path('data') / 'raw' / DATASET_ID
RAW_CSV = 'ebd_relMay-2020.txt.gz'
def ingest():
"""Ingest eBird data."""
db.delete_dataset_records(DATASET_ID)
to_taxon_id = get_taxa()
db.insert_dataset({
'dataset_id': DATASET_ID,
'title': RAW_CSV,
'version': 'relMay-2020',
'url': 'https://ebird.org/home'})
chunk = 1_000_000
reader = pd.read_csv(
RAW_DIR / RAW_CSV,
delimiter='\t',
quoting=3,
chunksize=chunk,
dtype='unicode')
to_place_id = {}
to_event_id = {}
for i, raw_data in enumerate(reader, 1):
log(f'Processing {DATASET_ID} chunk {i * chunk:,}')
raw_data = filter_data(raw_data)
if raw_data.shape[0] == 0:
continue
to_place_id = insert_places(raw_data, to_place_id)
to_event_id = insert_events(raw_data, to_place_id, to_event_id)
insert_counts(raw_data, to_event_id, to_taxon_id)
def get_taxa():
"""Build a dictionary of scientific names and taxon_ids."""
sql = """SELECT taxon_id, sci_name
FROM taxa
WHERE target = 't'
AND "class"='aves'"""
taxa = pd.read_sql(sql, db.connect())
return taxa.set_index('sci_name')['taxon_id'].to_dict()
def filter_data(raw_data):
"""Limit the size & scope of the data."""
raw_data = raw_data.rename(columns={
'LONGITUDE': 'lng',
'LATITUDE': 'lat',
'EFFORT DISTANCE KM': 'radius',
'TIME OBSERVATIONS STARTED': 'started',
' LOCALITY TYPE': 'LOCALITY TYPE',
'OBSERVATION COUNT': 'count'})
util.normalize_columns_names(raw_data)
raw_data['date'] = pd.to_datetime(
raw_data['OBSERVATION_DATE'], errors='coerce')
raw_data.loc[raw_data['count'] == 'X', 'count'] = '-1'
raw_data['count'] = pd.to_numeric(raw_data['count'], errors='coerce')
has_date = raw_data['date'].notna()
is_approved = raw_data['APPROVED'] == '1'
is_complete = raw_data['ALL_SPECIES_REPORTED'] == '1'
raw_data = raw_data[has_date & is_approved & is_complete]
return util.filter_lng_lat(
raw_data, 'lng', 'lat', lng=(-95.0, -50.0), lat=(20.0, 90.0))
def insert_places(raw_data, to_place_id):
"""Insert places."""
log(f'Inserting {DATASET_ID} places')
raw_data['place_key'] = tuple(zip(raw_data.lng, raw_data.lat))
places = raw_data.drop_duplicates('place_key')
old_places = places.place_key.isin(to_place_id)
places = places[~old_places]
places['place_id'] = db.create_ids(places, 'places')
places['dataset_id'] = DATASET_ID
is_na = places.radius.isna()
places.radius = pd.to_numeric(places.radius, errors='coerce').fillna(0.0)
places.radius *= 1000.0
places.loc[is_na, 'radius'] = None
fields = """COUNTRY_CODE STATE_CODE COUNTY_CODE IBA_CODE BCR_CODE
USFWS_CODE ATLAS_BLOCK LOCALITY_ID LOCALITY_TYPE
EFFORT_AREA_HA""".split()
places['place_json'] = util.json_object(places, fields)
places.loc[:, db.PLACE_FIELDS].to_sql(
'places', db.connect(), if_exists='append', index=False)
places['place_key'] = tuple(zip(places.lng, places.lat))
new_place_ids = places.set_index('place_key').place_id.to_dict()
return {**to_place_id, **new_place_ids}
def insert_events(raw_data, to_place_id, to_event_id):
"""Insert events."""
log(f'Inserting {DATASET_ID} events')
events = raw_data.drop_duplicates('SAMPLING_EVENT_IDENTIFIER')
old_events = events.SAMPLING_EVENT_IDENTIFIER.isin(to_event_id)
events = events[~old_events]
events['event_id'] = db.create_ids(events, 'events')
events['place_key'] = tuple(zip(events.lng, events.lat))
events['place_id'] = events.place_key.map(to_place_id)
events['year'] = events.date.dt.strftime('%Y')
events['day'] = events.date.dt.strftime('%j')
events['started'] = pd.to_datetime(events['started'], format='%H:%M:%S')
events['delta'] =
|
pd.to_numeric(events.DURATION_MINUTES, errors='coerce')
|
pandas.to_numeric
|
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not
|
isna(result.iloc[-6])
|
pandas.isna
|
import itertools
import re
import os
import time
import copy
import json
import Amplo
import joblib
import shutil
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import Union
from pathlib import Path
from datetime import datetime
from shap import TreeExplainer
from shap import KernelExplainer
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from Amplo import Utils
from Amplo.AutoML.Sequencer import Sequencer
from Amplo.AutoML.Modeller import Modeller
from Amplo.AutoML.DataSampler import DataSampler
from Amplo.AutoML.DataExplorer import DataExplorer
from Amplo.AutoML.DataProcessor import DataProcessor
from Amplo.AutoML.DriftDetector import DriftDetector
from Amplo.AutoML.FeatureProcessor import FeatureProcessor
from Amplo.AutoML.IntervalAnalyser import IntervalAnalyser
from Amplo.Classifiers.StackingClassifier import StackingClassifier
from Amplo.Documenting import BinaryDocumenting
from Amplo.Documenting import MultiDocumenting
from Amplo.Documenting import RegressionDocumenting
from Amplo.GridSearch import BaseGridSearch
from Amplo.GridSearch import HalvingGridSearch
from Amplo.GridSearch import OptunaGridSearch
from Amplo.Observation import DataObserver
from Amplo.Observation import ProductionObserver
from Amplo.Regressors.StackingRegressor import StackingRegressor
class Pipeline:
def __init__(self, **kwargs):
"""
Automated Machine Learning Pipeline for tabular data.
Designed for predictive maintenance applications, failure identification, failure prediction, condition
monitoring, etc.
Parameters
----------
Main Parameters:
main_dir [str]: Main directory of Pipeline (for documentation)
target [str]: Column name of the output/dependent/regressand variable.
name [str]: Name of the project (for documentation)
version [int]: Pipeline version (set automatically)
mode [str]: 'classification' or 'regression'
objective [str]: from sklearn metrics and scoring
Data Processor:
int_cols [list[str]]: Column names of integer columns
float_cols [list[str]]: Column names of float columns
date_cols [list[str]]: Column names of datetime columns
cat_cols [list[str]]: Column names of categorical columns
missing_values [str]: [DataProcessing] - 'remove', 'interpolate', 'mean' or 'zero'
outlier_removal [str]: [DataProcessing] - 'clip', 'boxplot', 'z-score' or 'none'
z_score_threshold [int]: [DataProcessing] If outlier_removal = 'z-score', the threshold is adaptable
include_output [bool]: Whether to include output in the training data (sensible only with sequencing)
Feature Processor:
extract_features [bool]: Whether to use FeatureProcessing module
information_threshold : [FeatureProcessing] Threshold for removing co-linear features
feature_timeout [int]: [FeatureProcessing] Time budget for feature processing
max_lags [int]: [FeatureProcessing] Maximum lags for lagged features to analyse
max_diff [int]: [FeatureProcessing] Maximum differencing order for differencing features
Interval Analyser:
interval_analyse [bool]: Whether to use IntervalAnalyser module
Note that this has no effect when data from ``self._read_data`` is not multi-indexed
Sequencing:
sequence [bool]: [Sequencing] Whether to use Sequence module
seq_back [int or list[int]]: Input time indices
If list -> includes all integers within the list
If int -> includes that many samples back
seq_forward [int or list[int]: Output time indices
If list -> includes all integers within the list.
If int -> includes that many samples forward.
seq_shift [int]: Shift input / output samples in time
seq_diff [int]: Difference the input & output, 'none', 'diff' or 'log_diff'
seq_flat [bool]: Whether to return a matrix (True) or Tensor (Flat)
Modelling:
standardize [bool]: Whether to standardize input/output data
shuffle [bool]: Whether to shuffle the samples during cross-validation
cv_splits [int]: How many cross-validation splits to make
store_models [bool]: Whether to store all trained model files
Grid Search:
grid_search_type [Optional[str]]: Which method to use 'optuna', 'halving', 'base' or None
grid_search_time_budget : Time budget for grid search
grid_search_candidates : Parameter evaluation budget for grid search
grid_search_iterations : Model evaluation budget for grid search
Stacking:
stacking [bool]: Whether to create a stacking model at the end
Production:
preprocess_function [str]: Add custom code for the prediction function, useful for production. Will be executed
with exec, can be multiline. Uses data as input.
Flags:
logging_level [Optional[Union[int, str]]]: Logging level for warnings, info, etc.
plot_eda [bool]: Whether to run Exploratory Data Analysis
process_data [bool]: Whether to force data processing
document_results [bool]: Whether to force documenting
no_dirs [bool]: Whether to create files or not
verbose [int]: Level of verbosity
"""
# Copy arguments
##################
# Main Settings
self.mainDir = kwargs.get('main_dir', 'AutoML/')
self.target = re.sub('[^a-z0-9]', '_', kwargs.get('target', '').lower())
self.name = kwargs.get('name', 'AutoML')
self.version = kwargs.get('version', None)
self.mode = kwargs.get('mode', None)
self.objective = kwargs.get('objective', None)
# Data Processor
self.intCols = kwargs.get('int_cols', None)
self.floatCols = kwargs.get('float_cols', None)
self.dateCols = kwargs.get('date_cols', None)
self.catCols = kwargs.get('cat_cols', None)
self.missingValues = kwargs.get('missing_values', 'zero')
self.outlierRemoval = kwargs.get('outlier_removal', 'clip')
self.zScoreThreshold = kwargs.get('z_score_threshold', 4)
self.includeOutput = kwargs.get('include_output', False)
# Balancer
self.balance = kwargs.get('balance', True)
# Feature Processor
self.extractFeatures = kwargs.get('extract_features', True)
self.informationThreshold = kwargs.get('information_threshold', 0.999)
self.featureTimeout = kwargs.get('feature_timeout', 3600)
self.maxLags = kwargs.get('max_lags', 0)
self.maxDiff = kwargs.get('max_diff', 0)
# Interval Analyser
self.useIntervalAnalyser = kwargs.get('interval_analyse', True)
# Sequencer
self.sequence = kwargs.get('sequence', False)
self.sequenceBack = kwargs.get('seq_back', 1)
self.sequenceForward = kwargs.get('seq_forward', 1)
self.sequenceShift = kwargs.get('seq_shift', 0)
self.sequenceDiff = kwargs.get('seq_diff', 'none')
self.sequenceFlat = kwargs.get('seq_flat', True)
# Modelling
self.standardize = kwargs.get('standardize', False)
self.shuffle = kwargs.get('shuffle', True)
self.cvSplits = kwargs.get('cv_shuffle', 10)
self.storeModels = kwargs.get('store_models', False)
# Grid Search Parameters
self.gridSearchType = kwargs.get('grid_search_type', 'optuna')
self.gridSearchTimeout = kwargs.get('grid_search_time_budget', 3600)
self.gridSearchCandidates = kwargs.get('grid_search_candidates', 250)
self.gridSearchIterations = kwargs.get('grid_search_iterations', 3)
# Stacking
self.stacking = kwargs.get('stacking', False)
# Production
self.preprocessFunction = kwargs.get('preprocess_function', None)
# Flags
self.plotEDA = kwargs.get('plot_eda', False)
self.processData = kwargs.get('process_data', True)
self.documentResults = kwargs.get('document_results', True)
self.verbose = kwargs.get('verbose', 0)
self.noDirs = kwargs.get('no_dirs', False)
# Checks
assert self.mode in [None, 'regression', 'classification'], 'Supported modes: regression, classification.'
assert 0 < self.informationThreshold < 1, 'Information threshold needs to be within [0, 1]'
assert self.maxLags < 50, 'Max_lags too big. Max 50.'
assert self.maxDiff < 5, 'Max diff too big. Max 5.'
assert self.gridSearchType is None \
or self.gridSearchType.lower() in ['base', 'halving', 'optuna'], \
'Grid Search Type must be Base, Halving, Optuna or None'
# Advices
if self.includeOutput and not self.sequence:
warnings.warn('[AutoML] IMPORTANT: strongly advices to not include output without sequencing.')
# Create dirs
if not self.noDirs:
self._create_dirs()
self._load_version()
# Store Pipeline Settings
self.settings = {'pipeline': kwargs, 'validation': {}, 'feature_set': ''}
# Objective & Scorer
self.scorer = None
if self.objective is not None:
assert isinstance(self.objective, str), 'Objective needs to be a string'
assert self.objective in metrics.SCORERS.keys(), 'Metric not supported, look at sklearn.metrics'
# Required sub-classes
self.dataSampler = DataSampler()
self.dataProcessor = DataProcessor()
self.dataSequencer = Sequencer()
self.featureProcessor = FeatureProcessor()
self.intervalAnalyser = IntervalAnalyser()
self.driftDetector = DriftDetector()
# Instance initiating
self.bestModel = None
self._data = None
self.featureSets = None
self.results = None
self.n_classes = None
self.is_fitted = False
# Monitoring
logging_level = kwargs.get('logging_level', 'INFO')
logging_dir = Path(self.mainDir) / 'app_logs.log' if not self.noDirs else None
self.logger = Utils.logging.get_logger('AutoML', logging_dir, logging_level, capture_warnings=True)
self._prediction_time = None
self._main_predictors = None
# User Pointing Functions
def get_settings(self, version: int = None) -> dict:
"""
Get settings to recreate fitted object.
Parameters
----------
version : int, optional
Production version, defaults to current version
"""
if version is None or version == self.version:
assert self.is_fitted, "Pipeline not yet fitted."
return self.settings
else:
settings_path = self.mainDir + f'Production/v{self.version}/Settings.json'
assert Path(settings_path).exists(), 'Cannot load settings from nonexistent version'
return json.load(open(settings_path, 'r'))
def load_settings(self, settings: dict):
"""
Restores a pipeline from settings.
Parameters
----------
settings [dict]: Pipeline settings
"""
# Set parameters
settings['pipeline']['no_dirs'] = True
self.__init__(**settings['pipeline'])
self.settings = settings
self.dataProcessor.load_settings(settings['data_processing'])
self.featureProcessor.load_settings(settings['feature_processing'])
# TODO: load_settings for IntervalAnalyser (not yet implemented)
if 'drift_detector' in settings:
self.driftDetector = DriftDetector(
num_cols=self.dataProcessor.float_cols + self.dataProcessor.int_cols,
cat_cols=self.dataProcessor.cat_cols,
date_cols=self.dataProcessor.date_cols
).load_weights(settings['drift_detector'])
def load_model(self, model: object):
"""
Restores a trained model
"""
assert type(model).__name__ == self.settings['model']
self.bestModel = model
self.is_fitted = True
def fit(self, *args, **kwargs):
"""
Fit the full AutoML pipeline.
1. Prepare data for training
2. Train / optimize models
3. Prepare Production Files
Nicely organises all required scripts / files to make a prediction
Parameters
----------
args
For data reading - Propagated to `self.data_preparation`
kwargs
For data reading (propagated to `self.data_preparation`) AND
for production filing (propagated to `self.conclude_fitting`)
"""
# Starting
print('\n\n*** Starting Amplo AutoML - {} ***\n\n'.format(self.name))
# Prepare data for training
self.data_preparation(*args, **kwargs)
# Train / optimize models
self.model_training(**kwargs)
# Conclude fitting
self.conclude_fitting(**kwargs)
def data_preparation(self, *args, **kwargs):
"""
Prepare data for modelling
1. Data Processing
Cleans all the data. See @DataProcessing
2. (optional) Exploratory Data Analysis
Creates a ton of plots which are helpful to improve predictions manually
3. Feature Processing
Extracts & Selects. See @FeatureProcessing
Parameters
----------
args
For data reading - Propagated to `self._read_data`
kwargs
For data reading - Propagated to `self._read_data`
"""
# Reading data
self._read_data(*args, **kwargs)
# Check data
obs = DataObserver(pipeline=self)
obs.observe()
# Detect mode (classification / regression)
self._mode_detector()
# Preprocess Data
self._data_processing()
# Run Exploratory Data Analysis
self._eda()
# Balance data
self._data_sampling()
# Sequence
self._sequencing()
# Extract and select features
self._feature_processing()
# Interval-analyze data
self._interval_analysis()
# Standardize
# Standardizing assures equal scales, equal gradients and no clipping.
# Therefore, it needs to be after sequencing & feature processing, as this alters scales
self._standardizing()
def model_training(self, **kwargs):
"""Train models
1. Initial Modelling
Runs various off the shelf models with default parameters for all feature sets
If Sequencing is enabled, this is where it happens, as here, the feature set is generated.
2. Grid Search
Optimizes the hyperparameters of the best performing models
3. (optional) Create Stacking model
4. (optional) Create documentation
Parameters
----------
kwargs : optional
Keyword arguments that will be passed to `self.grid_search`.
"""
# Run initial models
self._initial_modelling()
# Optimize Hyper parameters
self.grid_search(**kwargs)
# Create stacking model
self._create_stacking()
def conclude_fitting(self, *, model=None, feature_set=None, params=None, **kwargs):
"""
Prepare production files that are necessary to deploy a specific
model / feature set combination
Creates or modifies the following files
- ``Model.joblib`` (production model)
- ``Settings.json`` (model settings)
- ``Report.pdf`` (training report)
Parameters
----------
model : str or list of str, optional
Model file for which to prepare production files. If multiple, selects the best.
feature_set : str or list of str, optional
Feature set for which to prepare production files. If multiple, selects the best.
params : dict, optional
Model parameters for which to prepare production files.
Default: takes the best parameters
kwargs
Collecting container for keyword arguments that are passed through `self.fit()`.
"""
# Set up production path
prod_dir = self.mainDir + f'Production/v{self.version}/'
Path(prod_dir).mkdir(exist_ok=True)
# Parse arguments
model, feature_set, params = self._parse_production_args(model, feature_set, params)
# Verbose printing
if self.verbose > 0:
print(f'[AutoML] Preparing Production files for {model}, {feature_set}, {params}')
# Set best model (`self.bestModel`)
self._prepare_production_model(prod_dir + 'Model.joblib', model, feature_set, params)
# Set and store production settings
self._prepare_production_settings(prod_dir + 'Settings.json', model, feature_set, params)
# Observe production
# TODO[TS, 25.05.2022]: Currently, we are observing the data also here.
# However, in a future version we probably will only observe the data
# directly after :func:`_read_data()`. For now we wait...
obs = ProductionObserver(pipeline=self)
obs.observe()
self.settings['production_observation'] = obs.observations
# Report
report_path = self.mainDir + f'Documentation/v{self.version}/{model}_{feature_set}.pdf'
if not Path(report_path).exists():
self.document(self.bestModel, feature_set)
shutil.copy(report_path, prod_dir + 'Report.pdf')
# Finish
self.is_fitted = True
print('[AutoML] All done :)')
def convert_data(self, x: pd.DataFrame, preprocess: bool = True) -> [pd.DataFrame, pd.Series]:
"""
Function that uses the same process as the pipeline to clean data.
Useful if pipeline is pickled for production
Parameters
----------
data [pd.DataFrame]: Input features
"""
# Convert to Pandas
if isinstance(x, np.ndarray):
x = pd.DataFrame(x, columns=[f"Feature_{i}" for i in range(x.shape[1])])
# Custom code
if self.preprocessFunction is not None and preprocess:
ex_globals = {'data': x}
exec(self.preprocessFunction, ex_globals)
x = ex_globals['data']
# Process data
x = self.dataProcessor.transform(x)
# Drift Check
self.driftDetector.check(x)
# Split output
y = None
if self.target in x.keys():
y = x[self.target]
if not self.includeOutput:
x = x.drop(self.target, axis=1)
# Sequence
if self.sequence:
x, y = self.dataSequencer.convert(x, y)
# Convert Features
x = self.featureProcessor.transform(x, self.settings['feature_set'])
# Standardize
if self.standardize:
x, y = self._transform_standardize(x, y)
# NaN test -- datetime should be taken care of by now
if x.astype('float32').replace([np.inf, -np.inf], np.nan).isna().sum().sum() != 0:
raise ValueError(f"Column(s) with NaN: {list(x.keys()[x.isna().sum() > 0])}")
# Return
return x, y
def predict(self, data: pd.DataFrame) -> np.ndarray:
"""
Full script to make predictions. Uses 'Production' folder with defined or latest version.
Parameters
----------
data [pd.DataFrame]: data to do prediction on
"""
start_time = time.time()
assert self.is_fitted, "Pipeline not yet fitted."
# Print
if self.verbose > 0:
print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))
# Convert
x, y = self.convert_data(data)
# Predict
if self.mode == 'regression' and self.standardize:
predictions = self._inverse_standardize(self.bestModel.predict(x))
else:
predictions = self.bestModel.predict(x)
# Stop timer
self._prediction_time = (time.time() - start_time) / len(x) * 1000
# Calculate main predictors
self._get_main_predictors(x)
return predictions
def predict_proba(self, data: pd.DataFrame) -> np.ndarray:
"""
Returns probabilistic prediction, only for classification.
Parameters
----------
data [pd.DataFrame]: data to do prediction on
"""
start_time = time.time()
assert self.is_fitted, "Pipeline not yet fitted."
assert self.mode == 'classification', 'Predict_proba only available for classification'
assert hasattr(self.bestModel, 'predict_proba'), '{} has no attribute predict_proba'.format(
type(self.bestModel).__name__)
# Print
if self.verbose > 0:
print('[AutoML] Predicting with {}, v{}'.format(type(self.bestModel).__name__, self.version))
# Convert data
x, y = self.convert_data(data)
# Predict
prediction = self.bestModel.predict_proba(x)
# Stop timer
self._prediction_time = (time.time() - start_time) / len(x) * 1000
# Calculate main predictors
self._get_main_predictors(x)
return prediction
# Fit functions
def _read_data(self, x=None, y=None, *, data=None, **kwargs):
"""
Reads and loads data into desired format.
Expects to receive:
1. Both, ``x`` and ``y`` (-> features and target), or
2. Either ``x`` or ``data`` (-> dataframe or path to folder)
Parameters
----------
x : np.ndarray or pd.Series or pd.DataFrame or str or Path, optional
x-data (input) OR acts as ``data`` parameter when param ``y`` is empty
y : np.ndarray or pd.Series, optional
y-data (target)
data : pd.DataFrame or str or Path, optional
Contains both, x and y, OR provides a path to folder structure
kwargs
Collecting container for keyword arguments that are passed through `self.fit()`.
Returns
-------
Pipeline
"""
assert x is not None or data is not None, 'No data provided'
assert (x is not None) ^ (data is not None), 'Setting both, `x` and `data`, is ambiguous'
# Labels are provided separately
if y is not None:
# Check data
x = x if x is not None else data
assert x is not None, 'Parameter ``x`` is not set'
assert isinstance(x, (np.ndarray, pd.Series, pd.DataFrame)), 'Unsupported data type for parameter ``x``'
assert isinstance(y, (np.ndarray, pd.Series)), 'Unsupported data type for parameter ``y``'
# Set target manually if not defined
if self.target == '':
self.target = 'target'
# Parse x-data
if isinstance(x, np.ndarray):
x = pd.DataFrame(x)
elif isinstance(x, pd.Series):
x = pd.DataFrame(x)
# Parse y-data
if isinstance(y, np.ndarray):
y = pd.Series(y, index=x.index)
y.name = self.target
# Check data
assert all(x.index == y.index), '``x`` and ``y`` indices do not match'
if self.target in x.columns:
assert all(x[self.target] == y), 'Target column co-exists in both, ``x`` and ``y`` data, ' \
f'but has not equal content. Rename the column ``{self.target}`` ' \
'in ``x`` or set a (different) target in initialization.'
# Concatenate x and y
data = pd.concat([x, y], axis=1)
# Set data parameter in case it is provided through parameter ``x``
data = data if data is not None else x
metadata = None
# A path was provided to read out (multi-indexed) data
if isinstance(data, (str, Path)):
# Set target manually if not defined
if self.target == '':
self.target = 'target'
# Parse data
data, metadata = Utils.io.merge_logs(data, self.target)
# Test data
assert self.target != '', 'No target string provided'
assert self.target in data.columns, 'Target column missing'
assert len(data.columns) == data.columns.nunique(), 'Columns have no unique names'
# Parse data
y = data[self.target]
x = data.drop(self.target, axis=1)
if isinstance(x.columns, pd.RangeIndex):
x.columns = [f'Feature_{i}' for i in range(x.shape[1])]
# Concatenate x and y
data = pd.concat([x, y], axis=1)
# Save data
self.set_data(data)
# Store metadata in settings
self.settings['file_metadata'] = metadata or dict()
return self
def has_new_training_data(self):
# Return True if no previous version exists
if self.version == 1:
return True
# Get previous and current file metadata
curr_metadata = self.settings['file_metadata']
last_metadata = self.get_settings(self.version - 1)['file_metadata']
# Check each settings file
for file_id in curr_metadata:
# Get file specific metadata
curr = curr_metadata[file_id]
last = last_metadata.get(file_id, dict())
# Compare metadata
same_folder = curr['folder'] == last.get('folder')
same_file = curr['file'] == last.get('file')
same_mtime = curr['last_modified'] == last.get('last_modified')
if not all([same_folder, same_file, same_mtime]):
return False
return True
def _mode_detector(self):
"""
Detects the mode (Regression / Classification)
"""
# Only run if mode is not provided
if self.mode is None:
# Classification if string
if self.y.dtype == str or self.y.nunique() < 0.1 * len(self.data):
self.mode = 'classification'
self.objective = self.objective or 'neg_log_loss'
# Else regression
else:
self.mode = 'regression'
self.objective = self.objective or 'neg_mean_absolute_error'
# Set scorer
self.scorer = metrics.SCORERS[self.objective]
# Copy to settings
self.settings['pipeline']['mode'] = self.mode
self.settings['pipeline']['objective'] = self.objective
# Print
if self.verbose > 0:
print(f"[AutoML] Setting mode to {self.mode} & objective to {self.objective}.")
return
def _data_processing(self):
"""
Organises the data cleaning. Heavy lifting is done in self.dataProcessor, but settings etc. needs
to be organised.
"""
self.dataProcessor = DataProcessor(target=self.target, int_cols=self.intCols, float_cols=self.floatCols,
date_cols=self.dateCols, cat_cols=self.catCols,
missing_values=self.missingValues,
outlier_removal=self.outlierRemoval, z_score_threshold=self.zScoreThreshold)
# Set paths
data_path = self.mainDir + f'Data/Cleaned_v{self.version}.csv'
settings_path = self.mainDir + f'Settings/Cleaning_v{self.version}.json'
if Path(data_path).exists() and Path(settings_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
# Load settings
self.settings['data_processing'] = json.load(open(settings_path, 'r'))
self.dataProcessor.load_settings(self.settings['data_processing'])
if self.verbose > 0:
print('[AutoML] Loaded Cleaned Data')
else:
# Cleaning
data = self.dataProcessor.fit_transform(self.data)
self.set_data(data)
# Store data
self._write_csv(self.data, data_path)
# Save settings
self.settings['data_processing'] = self.dataProcessor.get_settings()
json.dump(self.settings['data_processing'], open(settings_path, 'w'))
# If no columns were provided, load them from data processor
if self.dateCols is None:
self.dateCols = self.settings['data_processing']['date_cols']
if self.intCols is None:
self.dateCols = self.settings['data_processing']['int_cols']
if self.floatCols is None:
self.floatCols = self.settings['data_processing']['float_cols']
if self.catCols is None:
self.catCols = self.settings['data_processing']['cat_cols']
# Assert classes in case of classification
self.n_classes = self.y.nunique()
if self.mode == 'classification':
if self.n_classes >= 50:
warnings.warn('More than 20 classes, you may want to reconsider classification mode')
if set(self.y) != set([i for i in range(len(set(self.y)))]):
raise ValueError('Classes should be [0, 1, ...]')
def _eda(self):
if self.plotEDA:
print('[AutoML] Starting Exploratory Data Analysis')
eda = DataExplorer(self.x, y=self.y,
mode=self.mode,
folder=self.mainDir,
version=self.version)
eda.run()
def _data_sampling(self):
"""
Only run for classification problems. Balances the data using imblearn.
Does not guarantee to return balanced classes. (Methods are data dependent)
"""
self.dataSampler = DataSampler(method='both', margin=0.1, cv_splits=self.cvSplits, shuffle=self.shuffle,
fast_run=False, objective=self.objective)
# Set paths
data_path = self.mainDir + f'Data/Balanced_v{self.version}.csv'
# Only necessary for classification
if self.mode == 'classification' and self.balance:
if Path(data_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
if self.verbose > 0:
print('[AutoML] Loaded Balanced data')
else:
# Fit and resample
print('[AutoML] Resampling data')
x, y = self.dataSampler.fit_resample(self.x, self.y)
# Store
self._set_xy(x, y)
self._write_csv(self.data, data_path)
def _sequencing(self):
"""
Sequences the data. Useful mostly for problems where older samples play a role in future values.
The settings of this module are NOT AUTOMATIC
"""
self.dataSequencer = Sequencer(back=self.sequenceBack, forward=self.sequenceForward,
shift=self.sequenceShift, diff=self.sequenceDiff)
# Set paths
data_path = self.mainDir + f'Data/Sequence_v{self.version}.csv'
if self.sequence:
if Path(data_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
if self.verbose > 0:
print('[AutoML] Loaded Extracted Features')
else:
# Sequencing
print('[AutoML] Sequencing data')
x, y = self.dataSequencer.convert(self.x, self.y)
# Store
self._set_xy(x, y)
self._write_csv(self.data, data_path)
def _feature_processing(self):
"""
Organises feature processing. Heavy lifting is done in self.featureProcessor, but settings, etc.
needs to be organised.
"""
self.featureProcessor = FeatureProcessor(mode=self.mode, max_lags=self.maxLags, max_diff=self.maxDiff,
extract_features=self.extractFeatures, timeout=self.featureTimeout,
information_threshold=self.informationThreshold)
# Set paths
data_path = self.mainDir + f'Data/Extracted_v{self.version}.csv'
settings_path = self.mainDir + f'Settings/Extracting_v{self.version}.json'
if Path(data_path).exists() and Path(settings_path).exists():
# Loading data
x = self._read_csv(data_path)
self._set_x(x)
# Loading settings
self.settings['feature_processing'] = json.load(open(settings_path, 'r'))
self.featureProcessor.load_settings(self.settings['feature_processing'])
self.featureSets = self.settings['feature_processing']['featureSets']
if self.verbose > 0:
print('[AutoML] Loaded Extracted Features')
else:
print('[AutoML] Starting Feature Processor')
# Transform data
x, self.featureSets = self.featureProcessor.fit_transform(self.x, self.y)
# Store data
self._set_x(x)
self._write_csv(self.x, data_path)
# Save settings
self.settings['feature_processing'] = self.featureProcessor.get_settings()
json.dump(self.settings['feature_processing'], open(settings_path, 'w'))
def _interval_analysis(self):
"""
Interval-analyzes the data using ``Amplo.AutoML.IntervalAnalyser``
or resorts to pre-computed data, if present.
"""
# Skip analysis when analysis is not possible and/or not desired
is_interval_analyzable = len(self.x.index.names) == 2
if not (self.useIntervalAnalyser and is_interval_analyzable):
return
self.intervalAnalyser = IntervalAnalyser(target=self.target)
# Set paths
data_path = self.mainDir + f'Data/Interval_Analyzed_v{self.version}.csv'
settings_path = self.mainDir + f'Settings/Interval_Analysis_v{self.version}.json'
if Path(data_path).exists(): # TODO: and Path(settings_path).exists():
# Load data
data = self._read_csv(data_path)
self.set_data(data)
# TODO implement `IntervalAnalyser.load_settings` and add to `self.load_settings`
# # Load settings
# self.settings['interval_analysis'] = json.load(open(settings_path, 'r'))
# self.intervalAnalyser.load_settings(self.settings['interval_analysis'])
if self.verbose > 0:
print('[AutoML] Loaded interval-analyzed data')
else:
print(f'[AutoML] Interval-analyzing data')
# Transform data
data = self.intervalAnalyser.fit_transform(self.x, self.y)
# Store data
self.set_data(data)
self._write_csv(self.data, data_path)
# TODO implement `IntervalAnalyser.get_settings` and add to `self.get_settings`
# # Save settings
# self.settings['interval_analysis'] = self.intervalAnalyser.get_settings()
# json.dump(self.settings['interval_analysis'], open(settings_path, 'w'))
def _standardizing(self):
"""
Wrapper function to determine whether to fit or load
"""
# Return if standardize is off
if not self.standardize:
return
# Set paths
settings_path = self.mainDir + f'Settings/Standardize_v{self.version}.json'
if Path(settings_path).exists():
# Load data
self.settings['standardize'] = json.load(open(settings_path, 'r'))
else:
# Fit data
self._fit_standardize(self.x, self.y)
# Store Settings
json.dump(self.settings['standardize'], open(settings_path, 'w'))
# Transform data
x, y = self._transform_standardize(self.x, self.y)
self._set_xy(x, y)
def _initial_modelling(self):
"""
Runs various models to see which work well.
"""
# Set paths
results_path = Path(self.mainDir) / 'Results.csv'
# Load existing results
if results_path.exists():
# Load results
self.results = pd.read_csv(results_path)
# Printing here as we load it
results = self.results[np.logical_and(
self.results['version'] == self.version,
self.results['type'] == 'Initial modelling'
)]
for fs in set(results['dataset']):
print(f'[AutoML] Initial Modelling for {fs} ({len(self.featureSets[fs])})')
fsr = results[results['dataset'] == fs]
for i in range(len(fsr)):
row = fsr.iloc[i]
print(f'[AutoML] {row["model"].ljust(40)} {self.objective}: '
f'{row["mean_objective"]:.4f} \u00B1 {row["std_objective"]:.4f}')
# Check if this version has been modelled
if self.results is None or self.version not in self.results['version'].values:
# Iterate through feature sets
for feature_set, cols in self.featureSets.items():
# Skip empty sets
if len(cols) == 0:
print(f'[AutoML] Skipping {feature_set} features, empty set')
continue
print(f'[AutoML] Initial Modelling for {feature_set} features ({len(cols)})')
# Do the modelling
modeller = Modeller(mode=self.mode, shuffle=self.shuffle, store_models=self.storeModels,
objective=self.objective, dataset=feature_set,
store_results=False, folder=self.mainDir + 'Models/')
results = modeller.fit(self.x[cols], self.y)
# Add results to memory
results['type'] = 'Initial modelling'
results['version'] = self.version
if self.results is None:
self.results = results
else:
self.results = pd.concat([self.results, results])
# Save results
self.results.to_csv(results_path, index=False)
def grid_search(self, model=None, feature_set=None, parameter_set=None, **kwargs):
"""Runs a grid search.
By default, takes ``self.results`` and runs for the top ``n=self.gridSearchIterations`` optimizations.
There is the option to provide ``model`` and ``feature_set``, but **both** have to be provided. In this
case, the model and dataset combination will be optimized.
Implemented types, Base, Halving, Optuna.
Parameters
----------
model : list of (str or object) or object or str, optional
Which model to run grid search for.
feature_set : list of str or str, optional
Which feature set to run gid search for. Must be provided when `model` is not None.
Options: ``RFT``, ``RFI``, ``ShapThreshold`` or ``ShapIncrement``
parameter_set : dict, optional
Parameter grid to optimize over.
Notes
-----
When both parameters, ``model`` and ``feature_set``, are provided, the grid search behaves as follows:
- When both parameters are either of dtype ``str`` or have the same length, then grid search will
treat them as pairs.
- When one parameter is an iterable and the other parameter is either a string or an iterable
of different length, then grid search will happen for each unique combination of these parameters.
"""
# Skip grid search and set best initial model as best grid search parameters
if self.gridSearchType is None or self.gridSearchIterations == 0:
best_initial_model = self._sort_results(self.results[self.results['version'] == self.version]).iloc[:1]
best_initial_model['type'] = 'Hyper Parameter'
self.results = pd.concat([self.results, best_initial_model], ignore_index=True)
return self
# Define models
if model is None:
# Run through first best initial models (n=`self.gridSearchIterations`)
selected_results = self.sort_results(self.results[np.logical_and(
self.results['type'] == 'Initial modelling',
self.results['version'] == self.version,
)]).iloc[:self.gridSearchIterations]
models = [Utils.utils.get_model(model_name, mode=self.mode, samples=len(self.x))
for model_name in selected_results['model']]
feature_sets = selected_results['dataset']
elif feature_set is None:
raise AttributeError('When `model` is provided, `feature_set` cannot be None. '
'Provide either both params or neither of them.')
else:
models = [Utils.utils.get_model(model, mode=self.mode, samples=len(self.x))] \
if isinstance(model, str) else [model]
feature_sets = [feature_set] if isinstance(feature_set, str) else list(feature_set)
if len(models) != len(feature_sets):
# Create each combination
combinations = list(itertools.product(np.unique(models), np.unique(feature_sets)))
models = [elem[0] for elem in combinations]
feature_sets = [elem[1] for elem in combinations]
# Iterate and grid search over each pair of model and feature_set
for model, feature_set in zip(models, feature_sets):
# Organise existing model results
m_results = self.results[np.logical_and(
self.results['model'] == type(model).__name__,
self.results['version'] == self.version,
)]
m_results = self._sort_results(m_results[m_results['dataset'] == feature_set])
# Skip grid search if optimized model already exists
if ('Hyper Parameter' == m_results['type']).any():
print('[AutoML] Loading optimization results.')
grid_search_results = m_results[m_results['type'] == 'Hyper Parameter']
# Run grid search otherwise
else:
# Run grid search for model
grid_search_results = self._grid_search_iteration(model, parameter_set, feature_set)
grid_search_results = self.sort_results(grid_search_results)
# Store results
grid_search_results['version'] = self.version
grid_search_results['dataset'] = feature_set
grid_search_results['type'] = 'Hyper Parameter'
self.results = pd.concat([self.results, grid_search_results], ignore_index=True)
self.results.to_csv(self.mainDir + 'Results.csv', index=False)
# Validate
if self.documentResults:
params = Utils.io.parse_json(grid_search_results.iloc[0]['params'])
# TODO: What about other than our custom models? They don't have `set_params()` method
self.document(model.set_params(**params), feature_set)
return self
def _create_stacking(self):
"""
Based on the best performing models, in addition to cheap models based on very different assumptions,
A stacking model is optimized to enhance/combine the performance of the models.
--> should contain a large variety of models
--> classifiers need predict_proba
--> level 1 needs to be ordinary least squares
"""
if self.stacking:
print('[AutoML] Creating Stacking Ensemble')
# Select feature set that has been picked most often for hyper parameter optimization
results = self._sort_results(self.results[np.logical_and(
self.results['type'] == 'Hyper Parameter',
self.results['version'] == self.version,
)])
feature_set = results['dataset'].value_counts().index[0]
results = results[results['dataset'] == feature_set]
print('[AutoML] Selected Stacking feature set: {}'.format(feature_set))
# Create Stacking Model Params
n_stacking_models = 3
stacking_models_str = results['model'].unique()[:n_stacking_models]
stacking_models_params = [Utils.io.parse_json(results.iloc[np.where(results['model'] == sms)[0][0]]['params'])
for sms in stacking_models_str]
stacking_models = dict([(sms, stacking_models_params[i]) for i, sms in enumerate(stacking_models_str)])
print('[AutoML] Stacked models: {}'.format(list(stacking_models.keys())))
# Add samples & Features
stacking_models['n_samples'], stacking_models['n_features'] = self.x.shape
# Prepare Stack
if self.mode == 'regression':
stack = StackingRegressor(**stacking_models)
cv = KFold(n_splits=self.cvSplits, shuffle=self.shuffle)
elif self.mode == 'classification':
stack = StackingClassifier(**stacking_models)
cv = StratifiedKFold(n_splits=self.cvSplits, shuffle=self.shuffle)
else:
raise NotImplementedError('Unknown mode')
# Cross Validate
x, y = self.x[self.featureSets[feature_set]].to_numpy(), self.y.to_numpy()
score = []
times = []
for (t, v) in tqdm(cv.split(x, y)):
start_time = time.time()
xt, xv, yt, yv = x[t], x[v], y[t].reshape((-1)), y[v].reshape((-1))
model = copy.deepcopy(stack)
model.fit(xt, yt)
score.append(self.scorer(model, xv, yv))
times.append(time.time() - start_time)
# Output Results
print('[AutoML] Stacking result:')
print('[AutoML] {}: {:.2f} \u00B1 {:.2f}'.format(self.objective, np.mean(score), np.std(score)))
self.results = self.results.append({
'date': datetime.today().strftime('%d %b %y'),
'model': type(stack).__name__,
'dataset': feature_set,
'params': json.dumps(stack.get_params()),
'mean_objective': np.mean(score),
'std_objective': np.std(score),
'mean_time': np.mean(times),
'std_time': np.std(times),
'version': self.version,
'type': 'Stacking',
}, ignore_index=True)
self.results.to_csv(self.mainDir + 'Results.csv', index=False)
# Document
if self.documentResults:
self.document(stack, feature_set)
def document(self, model, feature_set: str):
"""
Loads the model and features and initiates the outside Documenting class.
Parameters
----------
model [Object or str]- (optional) Which model to run grid search for.
feature_set [str]- (optional) Which feature set to run grid search for 'rft', 'rfi' or 'pps'
"""
# Get model
if isinstance(model, str):
model = Utils.utils.get_model(model, mode=self.mode, samples=len(self.x))
# Checks
assert feature_set in self.featureSets.keys(), 'Feature Set not available.'
if os.path.exists(self.mainDir + 'Documentation/v{}/{}_{}.pdf'.format(
self.version, type(model).__name__, feature_set)):
print('[AutoML] Documentation existing for {} v{} - {} '.format(
type(model).__name__, self.version, feature_set))
return
if len(model.get_params()) == 0:
warnings.warn('[Documenting] Supplied model has no parameters!')
# Run validation
print('[AutoML] Creating Documentation for {} - {}'.format(type(model).__name__, feature_set))
if self.mode == 'classification' and self.n_classes == 2:
documenting = BinaryDocumenting(self)
elif self.mode == 'classification':
documenting = MultiDocumenting(self)
elif self.mode == 'regression':
documenting = RegressionDocumenting(self)
else:
raise ValueError('Unknown mode.')
documenting.create(model, feature_set)
# Append to settings
self.settings['validation']['{}_{}'.format(type(model).__name__, feature_set)] = documenting.outputMetrics
def _parse_production_args(self, model=None, feature_set=None, params=None):
"""
Parse production arguments. Selects the best model, feature set and parameter combination.
Parameters
----------
model : str or list of str, optional
Model constraint(s)
feature_set : str or list of str, optional
Feature set constraint(s)
params : dict, optional
Parameter constraint(s)
Returns
-------
model : str
Best model given the `model` restraint(s).
feature_set : str
Best feature set given the `feature_set` restraint(s).
params : dict
Best model parameters given the `params` restraint(s).
"""
if model is not None and not isinstance(model, str):
# TODO: This issue is linked with AML-103 (in Jira)
# 1. Add to method docstring that it accepts a model instance, too
# 2. Change `if`-case to a single `isinstance(model, BasePredictor)`
# Get model name
model = type(model).__name__
# Get results of current version
results = self._sort_results(self.results[self.results['version'] == self.version])
if model is not None:
if isinstance(model, str):
model = [model]
# Filter results
results = self._sort_results(results[results['model'].isin(model)])
if feature_set is not None:
if isinstance(feature_set, str):
feature_set = [feature_set]
# Filter results
results = self._sort_results(results[results['dataset'].isin(feature_set)])
if params is None:
# Get best parameters
params = results.iloc[0]['params']
# Find the best allowed arguments
model = results.iloc[0]['model']
feature_set = results.iloc[0]['dataset']
params = Utils.io.parse_json(results.iloc[0]['params'])
return model, feature_set, params
def _prepare_production_model(self, model_path, model, feature_set, params):
"""
Prepare and store `self.bestModel` for production
Parameters
----------
model_path : str or Path
Where to store model for production
model : str, optional
Model file for which to prepare production files
feature_set : str, optional
Feature set for which to prepare production files
params : dict, optional
Model parameters for which to prepare production files.
Default: takes best parameters
Returns
-------
model : str
Updated name of model
feature_set : str
Updated name of feature set
"""
model_path = Path(model_path)
# Try to load model from file
if model_path.exists():
# Load model
self.bestModel = joblib.load(Path(model_path))
# Reset if it's not the desired model
if type(self.bestModel).__name__ != model or self.bestModel.get_params() != params:
self.bestModel = None
else:
self.bestModel = None
# Set best model
if self.bestModel is not None:
if self.verbose > 0:
print('[AutoML] Loading existing model file')
else:
# Make model
if 'Stacking' in model:
# Create stacking
if self.mode == 'regression':
self.bestModel = StackingRegressor(n_samples=len(self.x), n_features=len(self.x.keys()))
elif self.mode == 'classification':
self.bestModel = StackingClassifier(n_samples=len(self.x), n_features=len(self.x.keys()))
else:
raise NotImplementedError("Mode not set")
else:
# Take model as is
self.bestModel = Utils.utils.get_model(model, mode=self.mode, samples=len(self.x))
# Set params, train
self.bestModel.set_params(**params)
self.bestModel.fit(self.x[self.featureSets[feature_set]], self.y)
# Save model
joblib.dump(self.bestModel, model_path)
if self.verbose > 0:
score = self.scorer(self.bestModel, self.x[self.featureSets[feature_set]], self.y)
print(f'[AutoML] Model fully fitted, in-sample {self.objective}: {score:4f}')
return model, feature_set
def _prepare_production_settings(self, settings_path, model=None, feature_set=None, params=None):
"""
Prepare `self.settings` for production and dump to file
Parameters
----------
settings_path : str or Path
Where to save settings for production
model : str, optional
Model file for which to prepare production files
feature_set : str, optional
Feature set for which to prepare production files
params : dict, optional
Model parameters for which to prepare production files.
Default: takes best parameters
"""
assert self.bestModel is not None, '`self.bestModel` is not yet prepared'
settings_path = Path(settings_path)
# Update pipeline settings
self.settings['version'] = self.version
self.settings['pipeline']['verbose'] = self.verbose
self.settings['model'] = model
self.settings['params'] = params
self.settings['feature_set'] = feature_set
self.settings['features'] = self.featureSets[feature_set]
self.settings['amplo_version'] = Amplo.__version__ if hasattr(Amplo, '__version__') else 'dev'
# Prune Data Processor
required_features = self.featureProcessor.get_required_features(self.featureSets[feature_set])
self.dataProcessor.prune_features(required_features)
self.settings['data_processing'] = self.dataProcessor.get_settings()
# Fit Drift Detector
self.driftDetector = DriftDetector(
num_cols=self.dataProcessor.float_cols + self.dataProcessor.int_cols,
cat_cols=self.dataProcessor.cat_cols,
date_cols=self.dataProcessor.date_cols
)
self.driftDetector.fit(self.x)
self.driftDetector.fit_output(self.bestModel, self.x[self.featureSets[feature_set]])
self.settings['drift_detector'] = self.driftDetector.get_weights()
# Save settings
json.dump(self.settings, open(settings_path, 'w'), indent=4)
# Getter Functions / Properties
@property
def data(self) -> Union[None, pd.DataFrame]:
return self._data
@property
def x(self) -> pd.DataFrame:
if self.data is None:
raise AttributeError('Data is None')
if self.includeOutput:
return self.data
return self.data.drop(self.target, axis=1)
@property
def y(self):
if self.data is None:
raise AssertionError('`self.data` is empty. Set a value with `set_data`')
return self.data[self.target]
@property
def y_orig(self):
enc_labels = self.y
dec_labels = self.dataProcessor.decode_labels(enc_labels, except_not_fitted=False)
return pd.Series(dec_labels, name=self.target, index=enc_labels.index)
# Setter Functions
def set_data(self, new_data: pd.DataFrame):
assert isinstance(new_data, pd.DataFrame), 'Invalid data type'
assert self.target in new_data, 'No target column present'
assert len(new_data.columns) > 1, 'No feature column present'
self._data = new_data
def _set_xy(self, new_x: Union[np.ndarray, pd.DataFrame], new_y: Union[np.ndarray, pd.Series]):
if not isinstance(new_y, pd.Series):
new_y =
|
pd.Series(new_y, name=self.target)
|
pandas.Series
|
# Import libraries
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# Load the train and test datasets to create two DataFrames
train = pd.read_csv('train.csv')
test=
|
pd.read_csv('test.csv')
|
pandas.read_csv
|
########################################################################
# Copyright 2020 Battelle Energy Alliance, LLC ALL RIGHTS RESERVED #
# Mobility Systems & Analytics Group, Idaho National Laboratory #
########################################################################
import pyodbc
import pandas as pd
import pickle
import datetime
import time
import math
import yaml
#import geopandas
#import shapely
from pathlib import Path
import csv
import numpy as np
from sklearn.cluster import DBSCAN
from shapely import geometry
from shapely.geometry import MultiPoint
from haversine import haversine, Unit
import pynput
class cfg():
with open('locationGeneralizer.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
odbcConnectionString=config['odbcConnectionString']
inputTableOrCSV= config['inputTableOrCSV']
vehiclesInChunk = config['vehiclesInChunk']
qryVehicleIDList =config['qryVehicleIDList']
qryVehicleInfo = config['qryVehicleInfo']
qryVehicleIDList = qryVehicleIDList.replace('{inputsrc}', inputTableOrCSV)
qryVehicleInfo = qryVehicleInfo.replace('{inputsrc}', inputTableOrCSV)
errorLogFileName = config['errorLogFileName']
heartbeatFileName = config['heartbeatFileName']
locationInfoFileName = config['locationInfoFileName']
homeInfoFileName = config['homeInfoFileName']
pklCensusDivisionsFileName = config['pklCensusDivisionsFileName']
evseLookupFileName = config['evseLookupFileName']
bboxes = config['boundingBoxes']
gpsOdoThreshold_mi = config['gpsOdoThreshold_mi']
minTrips = config['minTrips']
minLastTrips = config['minLastTrips']
minPctParks = config['minPctParks']
distancePlaces = config['distancePlaces']
dayEndHours = config['dayEndHours']
dayEndMinutes = config['dayEndMinutes']
dbscan_eps_ft = config['dbscan_eps_ft']
dbscan_min_spls = config['dbscan_min_spls']
evseDistRange_Miles = config['evseDistRange_Miles']
evseLatRange = config['evseLatRange']
evseLonRange = config['evseLonRange']
hdrErrorLogCSV = config['hdrErrorLogCSV']
hdrLocationInfoCSV = config['hdrLocationInfoCSV']
hdrHomeInfoCSV = config['hdrHomeInfoCSV']
colLocationInfo = config['colLocationInfo']
colHomeInfo = config['colHomeInfo']
verbose = 0
stopProcessing = False
errFilePath = Path(errorLogFileName)
if not errFilePath.exists():
# ErroLog output file
hdr = pd.DataFrame(hdrErrorLogCSV)
hdr.to_csv(errorLogFileName, index=False, header=False, mode='w')
# use one line buffering - every line written is flushed to disk
errorFile = open(errorLogFileName, mode='a', buffering=1, newline='')
errorWriter = csv.writer(errorFile)
def on_press(key):
if hasattr(key, 'char'):
if key.char == 'v':
cfg.verbose = (cfg.verbose + 1) % 3 # verbosity levels: 0, 1, 2
print('Verbosity: {}'.format(cfg.verbose))
if key.char == 'q':
cfg.stopProcessing = not cfg.stopProcessing
if cfg.stopProcessing:
print("Processing will stop after current vehicle.")
else:
print("Stop canceled, processing will continue.")
def main():
listener = pynput.keyboard.Listener(on_press=on_press, suppress=True)
listener.start()
# for vehicle proceesing rate
vst = datetime.datetime.now()
# trust chained assignments (no warnings)
pd.set_option('mode.chained_assignment', None)
# LocationInfo output file
locationFilePath = Path(cfg.locationInfoFileName)
if not locationFilePath.exists():
hdr = pd.DataFrame(cfg.hdrLocationInfoCSV)
hdr.to_csv(cfg.locationInfoFileName, index=False, header=False, mode='w')
# HomeInfo output file
homeFilePath = Path(cfg.homeInfoFileName)
if not homeFilePath.exists():
hdr = pd.DataFrame(cfg.hdrHomeInfoCSV)
hdr.to_csv(cfg.homeInfoFileName, index=False, header=False, mode='w')
# get the census division polygons to use with finding home
# divisions = geopandas.read_file(cfg.censusDivisionsFileName)
# divisions = geopandas.GeoDataFrame.from_file(cfg.shpCensusDivisionsFileName)
# divisions.to_pickle(cfg.pklCensusDivisionsFileName)
## geopandas can read the shapefile directly, but we pickled it into one file
## a single pickle file simplifies distribution whereas,
## loading a shapefile requires several adjacent accompanying files
divisions = pd.read_pickle(cfg.pklCensusDivisionsFileName)
# get Public EVSE stations
EVSEs = pd.read_csv(cfg.evseLookupFileName)
# create empty LocationInfo data frame
# GPS coordinates are added here for convenience, but will not be carried into LocationInfo output file
locationInfo = pd.DataFrame(columns = cfg.colLocationInfo)
# create empty HomeInfo data frame
homeInfo = pd.DataFrame(columns = cfg.colHomeInfo)
# autocommit here is a workaround to prevent pyodbc from erroring when connecting to CSV files
pyodbc.autocommit = True
cnxn = pyodbc.connect(cfg.odbcConnectionString, autocommit=True)
lastVehicle = 0
hbFilePath = Path(cfg.heartbeatFileName)
if hbFilePath.exists():
with open(hbFilePath, 'r') as hb:
lastVehicle = hb.readline()
cfg.errorWriter.writerow([datetime.datetime.now(), lastVehicle, -1,'Restarting after vehicle {}'.format(lastVehicle)])
print('Restarting after vehicle {}'.format(lastVehicle))
# get sorted list of all vehicle IDs
qry = cfg.qryVehicleIDList.replace('{startVehicle}', str(lastVehicle))
df = pd.read_sql(qry, cnxn)
numOfVehicles = cfg.vehiclesInChunk # number of vehicle to process at a time. We can't process all at once due to dataset size, so this is the "chunk size" to process
vehicleList = df['VehicleID'].tolist()
# divide up vehicle ID list into sections of <numOfVehicle> length chunks (we'll read data in one chunk at a time to avoid memory overrun)
chunks = [vehicleList[i * numOfVehicles:(i+1)*numOfVehicles] for i in range((len(vehicleList) + numOfVehicles -1) // numOfVehicles)]
i = 0
vcnt = 0
for chunk in chunks:
chunkList = ','.join(str(e) for e in chunk)
qry = cfg.qryVehicleInfo.format(chunkList) # insert vehicleIDs into "in" list
if cfg.verbose > 0: print('Fetching data')
chunkData = pd.read_sql(qry, cnxn, parse_dates=['TripStartLocalTime', 'TripEndLocalTime'])
# sqlQry = pd.read_sql_query(qry, cnxn)
# chunkData = pd.DataFrame(sqlQry)
# create new column for flag to exclude bad records
chunkData['Include'] = True
i += 1
print("chunk: {}, vehicle from {} through {}".format(i, chunk[0], chunk[-1]))
# iterate through one vehicle at a time
for v in chunk:
if cfg.stopProcessing: exit()
if cfg.verbose > 0: print('Vehicle: {}'.format(v))
vcnt += 1
# grab all records in vehicle v
vData = chunkData[chunkData['VehicleID'] == v]
# create new column to check for Odometer gaps, i.e missing trips
vData['resid_Miles'] = vData['TripStartOdometer_Miles'].shift(periods=-1) - vData['TripEndOdometer_Miles']
### Check validity of data, marking invalid records (Include = True/False)
if cfg.verbose > 1: print(' Check for valid values')
vData = DoValidityChecking(v, vData)
vData.resid_Miles = vData.resid_Miles.astype(object).where(vData.resid_Miles.notnull(), None) # set NaN to None (becomes Null for DB)
# toss out rows that failed vailidity check
vData = vData[vData.Include == True]
numTrips = len(vData)
if numTrips < cfg.minTrips:
if cfg.verbose > 1: print(' Not enough trips, vehicle skipped.')
cfg.errorWriter.writerow([datetime.datetime.now(), v, -1,'Not enough trips, vehicle skipped. ({} need >= {}'.format(numTrips, cfg.minTrips)])
else:
# create new column for identify first/last trip of day
vData['TripFlag'] = None
### Identify first and last of trip of day
if cfg.verbose > 1: print(' Defining first/last trip of day')
vData = flagTrips(v, vData)
### Find clusters of vehicle locations
if cfg.verbose > 1: print(' Clustering')
vData, clusterData(v, vData)
# drop rows - remove previous vehicle info
homeInfo.drop(homeInfo.index, inplace=True)
locationInfo.drop(locationInfo.index, inplace=True)
# add row to LocationInfo data frame
liList = [vData[['VehicleID', 'TripStartLocalTime', 'TripEndLocalTime', 'TripStartLatitude', 'TripStartLongitude', 'TripEndLatitude','TripEndLongitude', 'TripStartClusterID', 'TripEndClusterID']]]
locationInfo = locationInfo.append(liList, ignore_index=True)
########################
#### FIND HOME MODULE 1: must have at least 30 valid last trip of day and 95% of those in one cluster
#### (does not actually return more than one home, but must returns an array to conform with other methods that may return more that one)
if cfg.verbose > 1: print(' Identifying home location')
topClusterIDs, homeCPs = findHome_Module1(v, vData)
########################
# process location and home info for home locations
if cfg.verbose > 1: print(' Calculating output metrics')
locationInfo, homeInfo = processHome(v, divisions, vData, locationInfo, homeInfo, topClusterIDs, homeCPs, EVSEs)
if not homeInfo.empty:
# write to output files
if cfg.verbose > 1: print(' Writing to output files')
locationInfo.to_csv(cfg.locationInfoFileName, index=False, header=False, mode='a')
homeInfo.to_csv(cfg.homeInfoFileName, index=False, header=False, mode='a')
# # use one line buffering - every line written is flushed to disk
with open(cfg.heartbeatFileName, mode='w', buffering=1, newline='') as hb:
hb.write(str(v))
if vcnt % 5 == 0:
ven = datetime.datetime.now()
secs = (ven - vst).seconds
if cfg.verbose > 0:
if secs > 0:
rate = 3600 * (vcnt/secs)
print('Processing rate (cumulative average): {:4.0f} / hour '.format(rate))
def findHome_Module1(v, vData):
CP = geometry.Point(0,0)
topClusterID = -1
# get the parks after known last trip of day
lastParks = vData[((vData['TripFlag'] == 'L') | (vData['TripFlag'] == 'FL')) & (vData['resid_Miles'] < 1) & (vData['resid_Miles'] > -1)]
if not lastParks.empty:
numValidLastTrips = lastParks['TripFlag'].count()
# get top cluster with last parks
#z = lastParks.groupby('TripEndClusterID')['TripEndClusterID'].count()
z = lastParks.groupby('TripEndClusterID')['TripEndClusterID'].count().sort_values(ascending=False)
topClusterCnt = z.iloc[0]
topClusterID = z.index[0]
if numValidLastTrips < cfg.minLastTrips:
cfg.errorWriter.writerow([datetime.datetime.now(), v, -1, 'Not enough last trips ({})'.format(numValidLastTrips)])
else:
#if topClusterCnt <= (numValidLastTrips * cfg.minPctParks):
# cfg.errorWriter.writerow([datetime.datetime.now(), v, -1, 'No home by percent trips. {} in clusterID {}, needed {:1.0f}% of {} ({:1.0f}).'.format(topClusterCnt, topClusterID, cfg.minPctParks*100, numValidLastTrips, (numValidLastTrips * cfg.minPctParks))])
#else:
if topClusterCnt > (numValidLastTrips * cfg.minPctParks):
# get centroid of top cluster (use only the home filtered points)
## get the home filtered points belonging to the topClusterID
dfPts = lastParks[['TripEndLatitude', 'TripEndLongitude']][lastParks['TripEndClusterID'] == topClusterID]
## convert to MultiPoint object
mpPts = MultiPoint(dfPts.to_numpy())
## get the center point of the topCLusterID
CP = mpPts.centroid
# need lat/long positions switched for "within" check
CP = geometry.Point(CP.y, CP.x)
# return as arrays to conform with other findHome modules
homeCPs = [CP, geometry.Point(0,0)]
topClusterIDs = [topClusterID, -1]
return topClusterIDs, homeCPs
# moving window module
def findHome_Module2(v, vData):
print(vData)
for i in range(0, len(vData), 30): print(i)
def getEVSEDistance(row, homeLat, homeLong):
dist = haversine((row.Latitude, row.Longitude), (homeLat, homeLong), unit=Unit.MILES)
return dist
def getStartLocationDistance(row, homeLat, homeLong, homeStart, homeEnd):
if (homeStart <= row['TripStartLocalTime'] <= homeEnd):
startDist = haversine((row['TripStartLatitude'], row['TripStartLongitude']), (homeLat, homeLong), unit=Unit.MILES)
else:
startDist = row['TripStartDistanceFromHome_Miles']
return startDist
def getEndLocationDistance(row, homeLat, homeLong, homeStart, homeEnd):
endDist = haversine((row['TripEndLatitude'], row['TripEndLongitude']), (homeLat, homeLong), unit=Unit.MILES)
if (homeStart <= row['TripEndLocalTime'] <= homeEnd):
endDist = haversine((row['TripEndLatitude'], row['TripEndLongitude']), (homeLat, homeLong), unit=Unit.MILES)
else:
endDist = row['TripEndDistanceFromHome_Miles']
return endDist
def processHome(v, divisions, vData, vLocationInfo, homeInfo, topClusterIDs, homeCPs, EVSEs):
# loop through home(s) from current vehicle
for cIdx, homeCPgeom in enumerate(homeCPs):
if homeCPgeom.x == 0.0: continue
#homeCPgeom = geometry.Point(CP[1], CP[0])
for i, division in divisions.iterrows():
if division.geometry.contains(homeCPgeom):
st = EVSEs[(EVSEs['Latitude'] > (homeCPgeom.y - cfg.evseLatRange)) &
(EVSEs['Latitude'] < (homeCPgeom.y + cfg.evseLatRange)) &
(EVSEs['Longitude'] > (homeCPgeom.x - cfg.evseLonRange)) &
(EVSEs['Longitude'] < (homeCPgeom.x + cfg.evseLonRange))]
if not st.empty:
st['hMiles'] = st.apply(getEVSEDistance, args=(homeCPgeom.y, homeCPgeom.x), axis=1)
st = st[st['hMiles'] <= cfg.evseDistRange_Miles]
l2Cnt = 0
dcCnt = 0
if not st.empty:
l2Cnt = st['L2'].sum()
dcCnt = st['DCFC'].sum()
# add info to homeInfo
#homeStart = vData['TripStartLocalTime'][vData['ClusterID'] == topClusterIDs[cIdx]].min()
#homeEnd = vData['TripEndLocalTime'][vData['ClusterID'] == topClusterIDs[cIdx]].max()
homeStart = vData['TripStartLocalTime'].min()
homeEnd = vData['TripEndLocalTime'].max()
newRow = {'VehicleID':v,
'HomeStartLocalTime':homeStart, 'HomeEndLocalTime':homeEnd,
'HomeRegion':division['NAME'], 'PublicChargingDensityL2':l2Cnt, 'PublicChargingDensityDCFC':dcCnt}
homeInfo = homeInfo.append(newRow, ignore_index=True)
# compute distance from home to trip start/end
vLocationInfo['TripStartDistanceFromHome_Miles'] = vLocationInfo.apply(getStartLocationDistance, args=(homeCPgeom.y, homeCPgeom.x, homeStart, homeEnd), axis = 1)
vLocationInfo['TripEndDistanceFromHome_Miles'] = vLocationInfo.apply(getEndLocationDistance, args=(homeCPgeom.y, homeCPgeom.x, homeStart, homeEnd), axis = 1)
vLocationInfo = vLocationInfo.round({'TripStartDistanceFromHome_Miles': cfg.distancePlaces, 'TripEndDistanceFromHome_Miles': cfg.distancePlaces})
# categorize locations
vLocationInfo.loc[vLocationInfo['TripStartClusterID'] == topClusterIDs[cIdx], 'TripStartLocationCategory'] = 'home'
vLocationInfo.loc[vLocationInfo['TripEndClusterID'] == topClusterIDs[cIdx], 'TripEndLocationCategory'] = 'home'
vLocationInfo.loc[vLocationInfo['TripStartClusterID'] != topClusterIDs[cIdx], 'TripStartLocationCategory'] = 'away'
vLocationInfo.loc[vLocationInfo['TripEndClusterID'] != topClusterIDs[cIdx], 'TripEndLocationCategory'] = 'away'
break
# remove GPS location info
vLocationInfo.drop(['TripStartLatitude', 'TripStartLongitude', 'TripEndLatitude', 'TripEndLongitude'], axis=1, inplace=True)
return vLocationInfo, homeInfo
def flagTrips(v, vData):
# use offset as end/start of day, e.g. 3:30 AM
vData['TripStartDateOffset'] = (vData['TripStartLocalTime'] - datetime.timedelta(hours=cfg.dayEndHours, minutes=cfg.dayEndMinutes)).dt.date
vData['TripEndDateOffset']= (vData['TripEndLocalTime'] - datetime.timedelta(hours=cfg.dayEndHours, minutes=cfg.dayEndMinutes)).dt.date
lastIdx = len(vData) - 1
curParkEndDate = vData['TripStartDateOffset'][0:1].item()
vData['TripFlag'][0:1] = 'F'
tripsCnt = 0
# find first and last trips in the day
for i in range(1, lastIdx):
tripsCnt += 1
# compare current (i) record to endDate
if vData['TripEndDateOffset'][i:i+1].item() != curParkEndDate:
vData['TripFlag'][i-1:i] = 'FL' if vData['TripFlag'][i-1:i].item() == 'F' else 'L' #
vData['TripFlag'][i:i+1] = 'F'
curParkEndDate = vData['TripEndDateOffset'][i:i+1].item()
tripsCnt = 0
vData['TripFlag'][-1:] = 'FL' if vData['TripFlag'][lastIdx-1:lastIdx].item() == 'L' else 'L'
return vData
def InBoundingBox(vd, colLat, colLon):
"""Check a value (latitude or longitude) to see if it is within the given range"""
if math.isnan(vd[colLat]) or math.isnan(vd[colLon]):
vd['Include'] = False
return vd
x = vd[colLat]
y = vd[colLon]
isFound = False
for k in cfg.bboxes.keys():
x1 = cfg.bboxes[k][0][0] # upper-
y1 = cfg.bboxes[k][0][1] # left coordinates
x2 = cfg.bboxes[k][1][0] # lower-
y2 = cfg.bboxes[k][1][1] # right coordinates
if x > x2 and x < x1 and y > y1 and y < y2: # note: x-axis decreases from bottom to top
isFound = True
break
# don't change any previously "falsed" flags
if not isFound:
vd['Include'] = False
return vd
def CheckDateTime(vd, colname):
try:
if
|
pd.isnull(vd[colname])
|
pandas.isnull
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions for 1D regression data."""
import chex
from enn import base as enn_base
from enn import supervised
from enn import utils
import haiku as hk
import jax
import numpy as np
import pandas as pd
import plotnine as gg
def make_regression_df() -> pd.DataFrame:
"""Creates our regression dataset."""
seed = 0
n_data = 10
x = np.concatenate([np.linspace(0, 0.5, n_data), np.linspace(1, 1.5, n_data)])
w = np.random.RandomState(seed).randn(n_data * 2) * 0.1
y = x + np.sin(3 * x) + np.sin(12 * x) + w
return pd.DataFrame({'x': x, 'y': y}).reset_index()
def make_dataset(extra_input_dim: int = 1) -> enn_base.BatchIterator:
"""Factory method to produce an iterator of Batches."""
df = make_regression_df()
data = enn_base.Batch(
x=np.vstack([df['x'].values, np.ones((extra_input_dim, len(df)))]).T,
y=df['y'].values[:, None],
)
chex.assert_shape(data.x, (None, 1 + extra_input_dim))
return utils.make_batch_iterator(data)
def make_plot(experiment: supervised.BaseExperiment,
num_sample: int = 20,
extra_input_dim: int = 1) -> gg.ggplot:
"""Generate a regression plot with sampled predictions."""
plot_df = make_plot_data(
experiment, num_sample=num_sample, extra_input_dim=extra_input_dim)
p = (gg.ggplot()
+ gg.aes('x', 'y')
+ gg.geom_point(data=make_regression_df(), size=3, colour='blue')
+ gg.geom_line(gg.aes(group='k'), data=plot_df, alpha=0.5)
)
return p
def make_plot_data(experiment: supervised.BaseExperiment,
num_sample: int = 20,
extra_input_dim: int = 1) -> pd.DataFrame:
"""Generate a panda dataframe with sampled predictions."""
preds_x = np.vstack([np.linspace(-1, 2), np.ones((extra_input_dim, 50))]).T
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for k in range(num_sample):
net_out = experiment.predict(preds_x, key=next(rng))
preds_y = utils.parse_net_output(net_out)
data.append(
|
pd.DataFrame({'x': preds_x[:, 0], 'y': preds_y[:, 0], 'k': k})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = pd.Series(data_right, index=index_data)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operators_comp_numeric_scalar(self):
"""Verifies using all various Series comparison binary operators on an integer Series and scalar values"""
S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0])
scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO]
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
def test_series_operators_comp_str_scalar(self):
"""Verifies using all various Series comparison binary operators on an string Series and scalar values"""
S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])
scalar_values = ['a', 'aa', 'ab', 'ba', '']
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
@skip_numba_jit
def test_series_operators_inplace_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = self.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 1)
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion2(self):
def test_impl(A, B):
S = B + 2
if A.iat[0] == 0:
S = A + 1
return S + B
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 3)
def test_series_operator_add_numeric_scalar(self):
"""Verifies Series.operator.add implementation for numeric series and scalar second operand"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']}
int_scalar = 24
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
result = hpat_func(A, int_scalar)
result_ref = test_impl(A, int_scalar)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
float_scalar = 24.0
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
ref_result = test_impl(A, float_scalar)
result = hpat_func(A, float_scalar)
pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False)
def test_series_operator_add_numeric_same_index_default(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), dtype=dtype_left)
B = pd.Series(np.arange(n)**2, dtype=dtype_right)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
@skip_numba_jit
def test_series_operator_add_numeric_same_index_numeric(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_numeric_fixme(self):
""" Same as test_series_operator_add_same_index_numeric but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with the same string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_int(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's']
index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's']
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series')
def test_series_operator_add_numeric_align_index_str_fixme(self):
"""Same as test_series_operator_add_align_index_str but with None values in string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None]
index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_other_dtype(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with non-equal integer indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64))
B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_diff_series_sizes(self):
"""Verifies implementation of Series.operator.add between two numeric Series with different sizes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
size_A, size_B = 7, 25
A = pd.Series(np.arange(size_A))
B = pd.Series(np.arange(size_B)**2)
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
def test_series_operator_add_align_index_int_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of numeric indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 20000
np.random.seed(0)
index1 = np.random.randint(-30, 30, n)
index2 = np.random.randint(-30, 30, n)
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_align_index_str_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of string indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 2000
np.random.seed(0)
valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd']
index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_align_index_int(self):
"""Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
A = pd.Series(data, index=index_A)
B = pd.Series(data, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_result_name1(self):
"""Verifies name of the Series resulting from appying Series.operator.add to different arguments"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_names = ['A', '', None, 'B']
for left_name, right_name in combinations(series_names, 2):
S1 = pd.Series(np.arange(n), name=left_name)
S2 = pd.Series(np.arange(n, 0, -1), name=right_name)
with self.subTest(left_series_name=left_name, right_series_name=right_name):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
# also verify case when second operator is scalar
scalar = 3.0
with self.subTest(scalar=scalar):
S1 = pd.Series(np.arange(n), name='A')
pd.testing.assert_series_equal(hpat_func(S1, scalar), test_impl(S1, scalar), check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_result_name2(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in returning unnamed Series when both operands are named Series with the same name"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
S1 = pd.Series(np.arange(n), name='A')
S2 = pd.Series(np.arange(n, 0, -1), name='A')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_series_dtype_promotion(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in dtype of resulting Series that is fixed to float64"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.array(np.arange(n), dtype=dtype_left))
B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_add_str_scalar(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [' ', 'wq', '', '23']
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_add_str_unsupported(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
1,
3.0,
pd.Series(np.arange(n)),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator add(). Not supported for not-comparable operands.'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_mul_str_scalar(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', ' ', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [-1, 0, 2, 5]
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series([-1, 2, 0, 5, 3, -5, 4])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_mul_str_align_index_int1(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes containg same unique values (so alignment doesn't produce NaNs) """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
shuffled_data = np.arange(n, dtype=np.int)
np.random.shuffle(shuffled_data)
index_A = shuffled_data
np.random.shuffle(shuffled_data)
index_B = shuffled_data
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
@unittest.expectedFailure # pandas can't calculate this due to adding NaNs to int series during alignment
def test_series_operator_mul_str_align_index_int2(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes that cannot be aligned without NaNs """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_unsupported(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
'abc',
3.0,
pd.Series(series_data),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator mul(). Not supported between operands of types:'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_lt_index_mismatch1(self):
"""Verifies correct exception is raised when comparing Series with non equal integer indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index1 = np.arange(n)
index2 = np.copy(index1)
np.random.shuffle(index2)
A = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0], index=index1)
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1], index=index2)
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
def test_series_operator_lt_index_mismatch2(self):
"""Verifies correct exception is raised when comparing Series of different size with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series([1, 2, -1, 3, 4, 2])
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1])
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
@skip_numba_jit('Numba propagates different exception:\n'
'numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)\n'
'Internal error at <numba.core.typeinfer.IntrinsicCallConstraint ...\n'
'\'Signature\' object is not iterable')
def test_series_operator_lt_index_mismatch3(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = pd.Series(['a', 'b', '', None, '2', 'ccc'])
with self.assertRaises(TypingError) as raises:
hpat_func(S1, S2)
msg = 'Operator lt(). Not supported for series with not-comparable indexes.'
self.assertIn(msg, str(raises.exception))
@skip_numba_jit("TODO: find out why pandas aligning series indexes produces Int64Index when common dtype is float\n"
"AssertionError: Series.index are different\n"
"Series.index classes are not equivalent\n"
"[left]: Float64Index([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype='float64')\n"
"[right]: Int64Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype='int64')\n")
def test_series_operator_lt_index_dtype_promotion(self):
"""Verifies implementation of Series.operator.lt between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_lt_index_dtype_promotion_fixme(self):
""" Same as test_series_operator_lt_index_dtype_promotion but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_lt_unsupported_dtypes(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable dtypes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = pd.Series(['a', 'b', '', None, '2', 'ccc'])
with self.assertRaises(TypingError) as raises:
hpat_func(S1, S2)
msg = 'Operator lt(). Not supported for not-comparable operands.'
self.assertIn(msg, str(raises.exception))
def test_series_operator_lt_str(self):
"""Verifies implementation of Series.operator.lt between two string Series with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
def test_series_binops_numeric(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
n = 11
cases_series = [
pd.Series(np.arange(1, n), name='A'),
pd.Series(np.ones(n - 1), name='B'),
pd.Series(np.arange(1, n) / 2, name='C'),
]
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for S1, S2 in combinations(cases_series, 2):
with self.subTest(S1=S1, S2=S2, method=method):
# check_dtype=False because SDC arithmetic methods return only float Series
pd.testing.assert_series_equal(
hpat_func(S1, S2),
test_impl(S1, S2),
check_dtype=False)
def test_series_binops_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
n = 11
cases_series = [
pd.Series(np.arange(1, n)),
pd.Series(np.ones(n - 1)),
pd.Series(np.arange(1, n) / 2),
]
cases_scalars = [0, 5, 0.5]
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for S1, scalar in product(cases_series, cases_scalars):
with self.subTest(S1=S1, scalar=scalar, method=method):
# check_dtype=False because SDC arithmetic methods return only float Series
pd.testing.assert_series_equal(
hpat_func(S1, scalar),
test_impl(S1, scalar),
check_dtype=False)
def test_series_binops_comp_numeric(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float64),
gen_frand_array(n),
np.ones(n, dtype=np.int32),
np.random.randint(-5, 5, n)]
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for data1, data2 in product(data_to_test, repeat=2):
A = pd.Series(data1)
B = pd.Series(data2)
with self.subTest(A=A, B=B):
pd.testing.assert_series_equal(
hpat_func(A, B),
test_impl(A, B),
check_names=False)
def test_series_binops_comp_numeric_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float64),
gen_frand_array(n),
np.ones(n, dtype=np.int32),
np.random.randint(-5, 5, n)]
scalar_values = [1, -1, 0, 3, 7, -5, 4.2]
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = self.jit(test_impl)
for data, scalar in product(data_to_test, scalar_values):
S = pd.Series(data)
with self.subTest(S=S, scalar=scalar, method=method):
pd.testing.assert_series_equal(
hpat_func(S, scalar),
test_impl(S, scalar),
check_names=False)
def test_series_binop_add_numeric(self):
"""Verifies implementation of Series.add method and fill_value param support on two float Series"""
def test_impl(S1, S2, value):
return S1.add(S2, fill_value=value)
sdc_func = self.jit(test_impl)
n = 100
np.random.seed(0)
cases_data = [
np.arange(n, dtype=np.float64),
gen_frand_array(n, nancount=25),
]
cases_index = [
None,
np.arange(n),
np.random.choice(np.arange(n), n, replace=False),
]
cases_value = [
None,
np.nan,
4,
5.5
]
for value, (arr1, arr2), (index1, index2) in product(
cases_value,
combinations_with_replacement(cases_data, 2),
combinations_with_replacement(cases_index, 2)):
S1 = pd.Series(arr1, index1)
S2 = pd.Series(arr2, index2)
with self.subTest(value=value, S1=S1, S2=S2):
result = sdc_func(S1, S2, value)
result_ref = test_impl(S1, S2, value)
pd.testing.assert_series_equal(result, result_ref)
def test_series_binop_add_scalar_numeric(self):
"""Verifies implementation of Series.add method and fill_value param support on float Series and a scalar"""
def test_impl(S1, S2, value):
return S1.add(S2, fill_value=value)
sdc_func = self.jit(test_impl)
S1 = pd.Series([1, np.nan, 3, np.nan, 5, 6, 7, np.nan, 9])
cases_value = [
None,
np.nan,
4,
5.5
]
cases_scalar = [
-2,
5.5,
np.nan
]
for fill_value, scalar in product(cases_value, cases_scalar):
with self.subTest(fill_value=fill_value, scalar=scalar):
result = sdc_func(S1, scalar, fill_value)
result_ref = test_impl(S1, scalar, fill_value)
pd.testing.assert_series_equal(result, result_ref)
def test_series_binop_add_numeric_diff_sizes(self):
"""Verifies implementation of Series.add method and fill_value param support
on two float Series with default indexes and different sizes"""
def test_impl(a, b, value):
return a.add(b, fill_value=value)
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, np.nan, 3, np.nan, 5, 6, 7, np.nan, 9])
S2 = pd.Series([1, np.nan, 3, 4, np.nan, 6])
values_to_test = [
None,
np.nan,
2,
2.1
]
for value in values_to_test:
with self.subTest(fill_value=value):
result = hpat_func(S1, S2, value)
result_ref = test_impl(S1, S2, value)
pd.testing.assert_series_equal(result, result_ref)
def test_series_binop_lt_numeric(self):
"""Verifies implementation of Series.lt method and fill_value param support on two float Series"""
def test_impl(S1, S2, value):
return S1.lt(S2, fill_value=value)
sdc_func = self.jit(test_impl)
n = 100
np.random.seed(0)
cases_data = [
np.arange(n, dtype=np.float64),
gen_frand_array(n, nancount=25),
]
cases_index = [
None,
np.arange(n),
pd.RangeIndex(n)
]
cases_value = [
None,
np.nan,
4,
5.5
]
for value, (arr1, arr2), (index1, index2) in product(
cases_value,
combinations_with_replacement(cases_data, 2),
combinations_with_replacement(cases_index, 2)):
S1 = pd.Series(arr1, index1)
S2 = pd.Series(arr2, index2)
with self.subTest(value=value, S1=S1, S2=S2):
result = sdc_func(S1, S2, value)
result_ref = test_impl(S1, S2, value)
pd.testing.assert_series_equal(result, result_ref)
def test_series_lt_scalar_numeric(self):
"""Verifies implementation of Series.lt method and fill_value param support on float Series and a scalar"""
def test_impl(S1, S2, value):
return S1.lt(S2, fill_value=value)
sdc_func = self.jit(test_impl)
S1 = pd.Series([1, np.nan, 3, np.nan, 5, 6, 7, np.nan, 9])
cases_value = [
None,
np.nan,
4,
5.5
]
cases_scalar = [
-2,
5.5,
np.nan
]
for fill_value, scalar in product(cases_value, cases_scalar):
with self.subTest(S1=S1, fill_value=fill_value, scalar=scalar):
result = sdc_func(S1, scalar, fill_value)
result_ref = test_impl(S1, scalar, fill_value)
pd.testing.assert_series_equal(result, result_ref)
# See SAT-4111 for more details
@skip_numba_jit("numpy + mkl_umath 1.0 // 0 gives nan, not inf as stock numpy>=1.20")
def test_series_binop_floordiv_numeric(self):
def test_impl(a, b, value):
return a.floordiv(b, fill_value=value)
hpat_func = self.jit(test_impl)
S1 = pd.Series([1., -5., 2., 2., np.nan, 2., 1.])
S2 = pd.Series([0., -2., 3., 2., 0., 2., 2.])
fill_values = [
None,
np.nan,
2,
2.1
]
for fill_value in fill_values:
with self.subTest(fill_value=fill_value):
result = hpat_func(S1, S2, fill_value)
result_ref = test_impl(S1, S2, fill_value)
pd.testing.assert_series_equal(result, result_ref)
def test_series_binop_add_same_non_unique_index(self):
"""Verifies addition of two Series with equal indexes with duplicate values that don't require alignment"""
def test_impl(a, b, value):
return a.add(b, fill_value=value)
hpat_func = self.jit(test_impl)
n = 1000
np.random.seed(0)
series_values = [-5, 5, 1/3, 2, -25.5, 1, 0, np.nan, np.inf]
index = np.random.choice(np.arange(n // 2), n)
S1 = pd.Series(np.random.choice(series_values, n), index)
S2 = pd.Series(np.random.choice(series_values, n), index)
fill_values = [
None,
np.nan,
2,
2.1
]
for fill_value in fill_values:
with self.subTest(fill_value=fill_value):
result = hpat_func(S1, S2, fill_value)
result_ref = test_impl(S1, S2, fill_value)
pd.testing.assert_series_equal(result, result_ref)
@skip_numba_jit("Arithmetic methods for string series not implemented yet")
def test_series_add_str(self):
def test_impl(a, b, value):
return a.add(b, fill_value=value)
hpat_func = self.jit(test_impl)
S1 = pd.Series(['a', 'bb', 'cc', None, 'd', 'ed'])
S2 = pd.Series(['aa', 'b', 'cc', 'a', None, 'de'])
fill_value = 'asd'
result = hpat_func(S1, S2, fill_value)
result_ref = test_impl(S1, S2, fill_value)
pd.testing.assert_series_equal(result, result_ref)
def test_series_lt_str(self):
"""Verifies implementation of Series.lt method and fill_value param support on two string Series"""
def test_impl(a, b, value):
return a.lt(b, fill_value=value)
hpat_func = self.jit(test_impl)
S1 =
|
pd.Series(['a', 'bb', 'cc', None, 'd', 'ed'])
|
pandas.Series
|
from time import perf_counter
from os import chdir, getcwd
import numpy as np
import pandas as pd
from plot import *
class del_then_inter:
def __init__(self, infile: str, has_imu: bool, conv_time: bool, plot_choice):
self.df, _ = setup(infile, has_imu, conv_time)
self.plot_choice = plot_choice
# Delete unimportant columns
#self.df.drop(self.df.loc[:,'IMU_AngVelX':'IMU_LinearAccZ'].columns, inplace=True, axis=1)
def delete_vals(self) -> None:
print('\n\tdeleting bad values...\n')
self.df = self.df.reset_index()
for i in range(len(self.df.GPS_Long)):
if self.df.SDn[i] > 0.005:
self.df.loc[i,'GPS_Long':'GPS_Alt'] = pd.NA
def interpolate(self) -> None:
print('\tinterpolating...\n')
# Force columns into numeric data types
self.df['GPS_Long'] = pd.to_numeric(self.df['GPS_Long'], errors='coerce')
self.df['GPS_Lat'] =
|
pd.to_numeric(self.df['GPS_Lat'], errors='coerce')
|
pandas.to_numeric
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.