prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Originally created: 02nd October, 2021
# <NAME>
# Last amended: 09th Oct, 2021
# Myfolder: 1/home/ashok/Documents/churnapp
# VM: lubuntu_healthcare
# Ref: https://builtin.com/machine-learning/streamlit-tutorial
#
# Objective:
# Deploy an ML model on web
#
########################
# Notes:
# 1, Run this app in its folder, as:
# cd /home/ashok/Documents/churnapp
# streamlit run churn-app.py
# 2. Accomanying file to experiment is
# expt.py
########################
# 1.0 Call libraries
# Install as: pip install streamlit
# Better create a separate conda environment for it
import streamlit as st
import pandas as pd
import numpy as np
import pickle
import base64
#import seaborn as sns
#import matplotlib.pyplot as plt
# 1.1 Set pandas options. None means no truncation
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Write some body-text on the Web-Page:
st.write("""
# Churn Prediction App
Customer churn is defined as the loss of customers after a certain period of time.
Companies are interested in targeting customers who are likely to churn. They can
target these customers with special deals and promotions to influence them to stay
with the company.
This app predicts the probability of a customer churning using Telco Customer data. Here
customer churn means the customer does not make another purchase after a period of time.
""")
# 2.0 Read data from current folder
# Default folder is where streamlit
# is being run. So this file
# should be in /home/ashok/Documents/churnapp
# Else, home folder is the default.
df_selected = pd.read_csv("telco_churn.csv")
# 2.1 We will select only a few columns
# for our model:
cols = ['gender', 'Partner', 'Dependents', 'PhoneService', 'tenure', 'MonthlyCharges', 'Churn']
df_selected_all = df_selected[cols].copy()
# 3.0 We will create a file download link
# in our webapp
# What is base64?
# See: https://levelup.gitconnected.com/what-is-base64-encoding-4b5ed1eb58a4
def filedownload(df):
csv = df.to_csv(index=False) # csv is now a string
csv = csv.encode() # csv is b' string or bytes
b64 = base64.b64encode(csv) # b64 is base64 encoded binary
b64 = b64.decode() # b64 is decoded to one of 64 characters
# 3.1 Create an html link to download datafile
# See here: https://stackoverflow.com/a/14011075
href = f'<a href="data:file/csv;base64,{b64}" download="churn_data.csv">Download CSV File</a>'
# 3.2 Return href object
return href
#st.set_option('deprecation.showPyplotGlobalUse', False)
# 3.3 Finally display the href link
href = filedownload(df_selected_all)
st.markdown(href, unsafe_allow_html=True)
# 4.0 Create a component to upload data file in the sidebar.
# 'uploaded_file' is a pandas dataframe
uploaded_file = st.sidebar.file_uploader(
"Upload your input CSV file",
type=["csv"]
)
# 4.1 Read data from file. Else, read from widgets
if uploaded_file is not None:
# 4.2 Read the uploaded file
input_df = pd.read_csv(uploaded_file)
else:
# 4.3 Define a function to create data from widgets
def user_input_features():
# 4.4 Create four widgets
gender = st.sidebar.selectbox('gender',('Male','Female'))
PaymentMethod = st.sidebar.selectbox('PaymentMethod',('Bank transfer (automatic)', 'Credit card (automatic)', 'Mailed check', 'Electronic check'))
MonthlyCharges = st.sidebar.slider('Monthly Charges', 18.0,118.0, 18.0)
tenure = st.sidebar.slider('tenure', 0.0,72.0, 0.0)
# 4.5 Collect widget output in a dictionary
data = {
'gender': [gender], # Should be a list data structure
'PaymentMethod': [PaymentMethod],
'MonthlyCharges':[MonthlyCharges],
'tenure': [tenure]
}
# 4,6 Transform data to DataFrame
features =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import os
import re
import sys
import math
import json
import bokeh
import geopandas
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from bokeh.io.doc import curdoc
from bokeh.layouts import layout
from bokeh.plotting import figure
from bokeh.models.glyphs import Text
from bokeh.application import Application
from bokeh.models.callbacks import CustomJS
from bokeh.plotting import show as plt_show
from bokeh.palettes import brewer,OrRd,YlGn
from bokeh.models.widgets import Button,Select
from bokeh.tile_providers import Vendors,get_provider
from bokeh.io import output_notebook,show,output_file
from bokeh.application.handlers import FunctionHandler
from bokeh.layouts import widgetbox,row,column,gridplot
from bokeh.models import ColumnDataSource,Slider,HoverTool,Select,Div,Range1d,WMTSTileSource,BoxZoomTool,TapTool,Panel,Tabs
from bokeh.models import GeoJSONDataSource,LinearColorMapper,ColorBar,NumeralTickFormatter,LinearAxis,Grid,Label,Band,Legend,LegendItem
verbose=False
enable_GeoJSON_saving=False
DATA_UPDATE_DATE='20-October-2021'
FORECASTS_UPDATE_DATE='19-October-2021'
def apply_corrections(input_df):
for state in list(input_df['state'].values):
input_df.loc[input_df['state']==state,'state']=re.sub('[^A-Za-z ]+', '',str(state))
input_df.loc[input_df['state']=='Karanataka','state']='Karnataka'
input_df.loc[input_df['state']=='Himanchal Pradesh','state']='Himachal Pradesh'
input_df.loc[input_df['state']=='Telengana','state']='Telangana'
input_df.loc[input_df['state']=='Dadra and Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Dadar Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Dadra Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Daman & Diu','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Daman and Diu','state']='Dadra and Nagar Haveli and Daman and Diu'
return input_df
def os_style_formatter(input_str):
try:
os_env=os.environ['OS']
except:
os_env='unknown'
return str(input_str).replace('/', "\\") if os_env=='Windows_NT' else str(input_str)
try:
India_statewise=geopandas.read_file('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/GeoJSON_assets/India_statewise_minified.geojson')
India_stats=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/Population_stats_India_statewise.csv')
covid19_data=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/COVID19_India_statewise.csv')
preds_df=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/experimental/output_preds.csv')
except:
India_GeoJSON_repoFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/GeoJSON_assets/India_statewise_minified.geojson')
covid19_statewise_repoFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/COVID19_India_statewise.csv')
India_statewise_statsFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/Population_stats_India_statewise.csv')
saved_predsFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/experimental/output_preds.csv')
if os.path.exists(India_GeoJSON_repoFile):
India_statewise=geopandas.read_file(India_GeoJSON_repoFile)
print('Reading India GeoJSON file from saved repo ...')
else:
sys.exit('Failed to read GeoJSON file for India ...')
if os.path.exists(covid19_statewise_repoFile):
covid19_data=pd.read_csv(covid19_statewise_repoFile)
print('Reading India COVID19 file from saved repo ...')
else:
sys.exit('Failed to read India COVID19 file ...')
if os.path.exists(India_statewise_statsFile):
India_stats=
|
pd.read_csv(India_statewise_statsFile)
|
pandas.read_csv
|
## FROM KAGGLE KERNEL:
import os
import random
import gc
import tqdm
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# import matplotlib.pyplot as plt
# import seaborn as sns
#
# from sklearn import preprocessing
# from sklearn.model_selection import KFold
# import lightgbm as lgb
# import xgboost as xgb
# import catboost as cb
# %% {"_kg_hide-input": true}
# Copy from https://www.kaggle.com/gemartin/load-data-reduce-memory-usage by @gemartin
# Modified to support timestamp type
# Modified to add option to use float16 or not. feather format does not support float16.
from pandas.api.types import is_datetime64_any_dtype as is_datetime
def reduce_mem_usage(df, use_float16=False):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
if is_datetime(df[col]):
# skip datetime type
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def import_data(file):
"""create a dataframe and optimize its memory usage"""
df = pd.read_csv(file, parse_dates=True, keep_date_col=True)
df = reduce_mem_usage(df)
return df
#%%
# %%
from pathlib import Path
import zipfile
DATA_PATH = '~/ashrae/data/raw'
DATA_PATH = Path(DATA_PATH)
DATA_PATH = DATA_PATH.expanduser()
assert DATA_PATH.exists(), DATA_PATH
DATA_FEATHER_PATH ='~/ashrae/data/feather'
DATA_FEATHER_PATH = Path(DATA_FEATHER_PATH)
DATA_FEATHER_PATH = DATA_FEATHER_PATH.expanduser()
assert DATA_FEATHER_PATH.exists()
# zipfile.ZipFile(DATA_PATH).infolist()
#%%
ZIPPED = False
# %%time
if ZIPPED:
with zipfile.ZipFile(DATA_PATH) as zf:
with zf.open('train.csv') as zcsv:
train_df = pd.read_csv(zcsv)
with zf.open('test.csv') as zcsv:
test_df = pd.read_csv(zcsv)
with zf.open('weather_train.csv') as zcsv:
weather_train_df = pd.read_csv(zcsv)
with zf.open('weather_test.csv') as zcsv:
weather_test_df = pd.read_csv(zcsv)
with zf.open('building_metadata.csv') as zcsv:
building_meta_df = pd.read_csv(zcsv)
with zf.open('sample_submission.csv') as zcsv:
sample_submission = pd.read_csv(zcsv)
#%%
train_df = pd.read_csv(DATA_PATH / 'train.zip')
test_df =
|
pd.read_csv(DATA_PATH / 'test.zip')
|
pandas.read_csv
|
import os
from src.utils.common.common_helper import read_config
from loguru import logger
from flask import session
from src.utils.databases.mysql_helper import MySqlHelper
from from_root import from_root
import pandas as pd
path = os.path.join(from_root(), 'config.yaml')
config_args = read_config(path)
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
class ProjectReports:
projectStatus = [{'key': 1, 'value': 'Initialized'}, {'key': 2, 'value': 'EDA'}, {'key': 3, 'value': 'Data Processing'}, {'key': 4, 'value': 'Feature Engineering'}, {'key': 5, 'value': 'Model Training'}]
@staticmethod
def insert_record_eda(actionName, input='', output='', isSuccessed=1, errorMessage=''):
try:
mysql = MySqlHelper.get_connection_obj()
query = f"""INSERT INTO
tblProjectReports(`Projectid`, `ModuleId`, `ActionName`, `Input`, `IsSuccessed`, `Output`, `ErrorMessage`)
VALUES ('{session.get('pid')}','2','{actionName}','{input}', {isSuccessed},'{output}','{errorMessage}')"""
logger.info(f"{session.get('id')} details uploaded successfully for EDA!")
rowcount = mysql.insert_record(query)
except Exception as e:
logger.error(f"{session.get('pid')} details upload failed for EDA!")
@staticmethod
def insert_record_dp(actionName, input='', output='', isSuccessed=1, errorMessage=''):
try:
mysql = MySqlHelper.get_connection_obj()
query = f"""INSERT INTO
tblProjectReports(`Projectid`, `ModuleId`, `ActionName`, `Input`, `IsSuccessed`, `Output`, `ErrorMessage`)
VALUES ('{session.get('pid')}','3','{actionName}','{input}',{isSuccessed},'{output}','{errorMessage}')"""
logger.info(f"{session.get('pid')} details uploaded successfully for Data Preprocessing!")
rowcount = mysql.insert_record(query)
except Exception as e:
logger.error(f"{session.get('pid')} details upload failed for Data Preprocessing!")
@staticmethod
def insert_record_fe(actionName, input='', output='', isSuccessed=1, errorMessage=''):
try:
mysql = MySqlHelper.get_connection_obj()
query = f"""INSERT INTO
tblProjectReports(`Projectid`, `ModuleId`, `ActionName`, `Input`, `IsSuccessed`, `Output`, `ErrorMessage`)
VALUES ('{session.get('pid')}','4','{actionName}','{input}',{isSuccessed},'{output}','{errorMessage}')"""
logger.info(f"{session.get('pid')} details uploaded successfully for Feature Engineering!")
rowcount = mysql.insert_record(query)
except Exception as e:
logger.error(f"{session.get('pid')} details upload failed for Feature Engineering!")
@staticmethod
def insert_record_ml(actionName, input='', output='', isSuccessed=1, errorMessage=''):
try:
mysql = MySqlHelper.get_connection_obj()
query = f"""INSERT INTO
tblProjectReports(`Projectid`, `ModuleId`, `ActionName`, `Input`, `IsSuccessed`, `Output`, `ErrorMessage`)
VALUES ('{session.get('pid')}','5','{actionName}','{input}',{isSuccessed},'{output}','{errorMessage}')"""
logger.info(f"{session.get('pid')} details uploaded successfully for Machine Learning!")
rowcount = mysql.insert_record(query)
except Exception as e:
logger.error(f"{session.get('pid')} details upload failed for Machine Learning!")
"""[summary]
Method To Add Project Actions Report
"""
@staticmethod
def insert_project_action_report(projectActionId, input='', output=''):
try:
mysql = MySqlHelper.get_connection_obj()
query = f"""INSERT INTO
tblProject_Actions_Reports(ProjectId, ProjectActionId, Input, Output)
VALUES ({session.get('pid')},{projectActionId},"{input}",'{output}')"""
rowcount = mysql.insert_record(query)
return rowcount
except Exception as e:
print(e)
@staticmethod
def add_active_module(moduleId):
# if 'mysql' not in st.session_state:
# ProjectReports.make_mysql_connection()
# print("called")
# mysql=st.session_state['mysql']
# query=f"""Update tblProjects SET Status={moduleId} Where Id={session.get('id')}"""
# rowcount = mysql.insert_record(query)
pass
@staticmethod
def get_record_by_pid(pid, moduleId):
try:
mysql = MySqlHelper.get_connection_obj()
query = f"""SHOW COLUMNS FROM tblProjectReports"""
columns = mysql.fetch_all(query)
columns = pd.DataFrame(columns)[0].tolist()
columns.insert(3, 'Module Name')
columns = columns[3:]
# query = f"""select tblProjectStatus.Name, tblProjectReports.ActionName, tblProjectReports.Input, tblProjectReports.IsSuccessed, tblProjectReports.Output, tblProjectReports.ErrorMessage, tblProjectReports.CreateDate from tblProjectReports join tblProjectStatus on (tblProjectReports.ModuleId=tblProjectStatus.Id) where tblProjectReports.Projectid = {pid}"""
# if moduleId != 0:
# query = query + f""" and tblProjectReports.ModuleId = {moduleId}"""
query = f'''select tblProjectStatus.Name, tblProjectReports.ActionName, tblProjectReports.Input,
tblProjectReports.IsSuccessed, tblProjectReports.Output, tblProjectReports.ErrorMessage,
tblProjectReports.CreateDate from tblProjectReports
join tblProjectStatus on (tblProjectReports.ModuleId=tblProjectStatus.Id)
join tblProjects on (tblProjectReports.Projectid=tblProjects.Id)
where tblProjects.Pid = '{pid}' '''
if moduleId is not None:
query+=" and tblProjectReports.ModuleId ={moduleId}"
records = mysql.fetch_all(query)
records =
|
pd.DataFrame(records, columns=columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import random
from pandasqt.compat import Qt, QtCore, QtGui
import pytest
import pytestqt
import decimal
import numpy
import pandas
from pandasqt.models.DataFrameModel import DataFrameModel, DATAFRAME_ROLE
from pandasqt.models.DataSearch import DataSearch
from pandasqt.models.SupportedDtypes import SupportedDtypes
def test_initDataFrame():
model = DataFrameModel()
assert model.dataFrame().empty
def test_initDataFrameWithDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
def test_setDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel()
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
with pytest.raises(TypeError) as excinfo:
model.setDataFrame(None)
assert "pandas.core.frame.DataFrame" in unicode(excinfo.value)
@pytest.mark.parametrize(
"copy, operator",
[
(True, numpy.not_equal),
(False, numpy.equal)
]
)
def test_copyDataFrame(copy, operator):
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
model.setDataFrame(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
def test_TimestampFormat():
model = DataFrameModel()
assert model.timestampFormat == Qt.ISODate
newFormat = u"yy-MM-dd hh:mm"
model.timestampFormat = newFormat
assert model.timestampFormat == newFormat
with pytest.raises(TypeError) as excinfo:
model.timestampFormat = "yy-MM-dd hh:mm"
assert "unicode" in unicode(excinfo.value)
#def test_signalUpdate(qtbot):
#model = DataFrameModel()
#with qtbot.waitSignal(model.layoutAboutToBeChanged) as layoutAboutToBeChanged:
#model.signalUpdate()
#assert layoutAboutToBeChanged.signal_triggered
#with qtbot.waitSignal(model.layoutChanged) as blocker:
#model.signalUpdate()
#assert blocker.signal_triggered
@pytest.mark.parametrize(
"orientation, role, index, expectedHeader",
[
(Qt.Horizontal, Qt.EditRole, 0, None),
(Qt.Vertical, Qt.EditRole, 0, None),
(Qt.Horizontal, Qt.DisplayRole, 0, 'A'),
(Qt.Horizontal, Qt.DisplayRole, 1, None), # run into IndexError
(Qt.Vertical, Qt.DisplayRole, 0, 0),
(Qt.Vertical, Qt.DisplayRole, 1, 1)
]
)
def test_headerData(orientation, role, index, expectedHeader):
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.headerData(index, orientation, role) == expectedHeader
def test_flags():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
index = model.index(0, 0)
assert index.isValid()
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled
model.enableEditing(True)
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
model.setDataFrame(pandas.DataFrame([True], columns=['A']))
index = model.index(0, 0)
model.enableEditing(True)
assert model.flags(index) != Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
def test_rowCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.rowCount() == 1
model = DataFrameModel(pandas.DataFrame(numpy.arange(100), columns=['A']))
assert model.rowCount() == 100
def test_columnCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.columnCount() == 1
model = DataFrameModel( pandas.DataFrame(numpy.arange(100).reshape(1, 100), columns=numpy.arange(100)) )
assert model.columnCount() == 100
class TestSort(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.mark.parametrize(
"signal",
[
"layoutAboutToBeChanged",
"layoutChanged",
"sortingAboutToStart",
"sortingFinished",
]
)
def test_signals(self, model, qtbot, signal):
with qtbot.waitSignal(getattr(model, signal)) as blocker:
model.sort(0)
assert blocker.signal_triggered
def test_returnValues(self, model):
model.sort(0)
@pytest.mark.parametrize(
"testAscending, modelAscending, isIdentic",
[
(True, Qt.AscendingOrder, True),
(False, Qt.DescendingOrder, True),
(True, Qt.DescendingOrder, False),
]
)
def test_sort(self, model, dataFrame, testAscending, modelAscending, isIdentic):
temp = dataFrame.sort('A', ascending=testAscending)
model.sort(0, order=modelAscending)
assert (dataFrame['A'] == temp['A']).all() == isIdentic
class TestData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
index = model.index(0, 0)
assert index.isValid()
return index
def test_invalidIndex(self, model):
assert model.data(QtCore.QModelIndex()) is None
def test_unknownRole(self, model, index):
assert index.isValid()
assert model.data(index, role="unknownRole") == None
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == None
# with pytest.raises(TypeError) as excinfo:
# model.data(index)
# assert "unhandled data type" in unicode(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
(u"äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
@pytest.mark.parametrize(
"value, dtype, precision", [
(1, numpy.int8, None),
(1, numpy.int16, None),
(1, numpy.int32, None),
(1, numpy.int64, None),
(1, numpy.uint8, None),
(1, numpy.uint16, None),
(1, numpy.uint32, None),
(1, numpy.uint64, None),
(1.11111, numpy.float16, DataFrameModel._float_precisions[str('float16')]),
(1.11111111, numpy.float32, DataFrameModel._float_precisions[str('float32')]),
(1.1111111111111111, numpy.float64, DataFrameModel._float_precisions[str('float64')])
]
)
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
assert model.data(index) == round(value, precision)
assert model.data(index, role=Qt.DisplayRole) == round(value, precision)
assert model.data(index, role=Qt.EditRole) == round(value, precision)
else:
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
#@<EMAIL>.parametrize(
#"border1, modifier, border2, dtype", [
#("min", -1, "max", numpy.uint8),
#("max", +1, "min", numpy.uint8),
#("min", -1, "max", numpy.uint16),
#("max", +1, "min", numpy.uint16),
#("min", -1, "max", numpy.uint32),
#("max", +1, "min", numpy.uint32),
#("min", -1, "max", numpy.uint64),
##("max", +1, "min", numpy.uint64), # will raise OverFlowError caused by astype function,
## uneffects models data method
#("min", -1, "max", numpy.int8),
#("max", +1, "min", numpy.int8),
#("min", -1, "max", numpy.int16),
#("max", +1, "min", numpy.int16),
#("min", -1, "max", numpy.int32),
#("max", +1, "min", numpy.int32),
##("min", -1, "max", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
##("max", +1, "min", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
#]
#)
#def test_integerBorderValues(self, model, index, border1, modifier, border2, dtype):
#ii = numpy.iinfo(dtype)
#dataFrame = pandas.DataFrame([getattr(ii, border1) + modifier], columns=['A'])
#dataFrame['A'] = dataFrame['A'].astype(dtype)
#model.setDataFrame(dataFrame)
#assert not model.dataFrame().empty
#assert model.dataFrame() is dataFrame
#assert index.isValid()
#assert model.data(index) == getattr(ii, border2)
@pytest.mark.parametrize(
"value, qtbool",
[
(True, Qt.Checked),
(False, Qt.Unchecked)
]
)
def test_bool(self, model, index, value, qtbool):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.bool_)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == qtbool
assert model.data(index, role=DATAFRAME_ROLE) == value
assert isinstance(model.data(index, role=DATAFRAME_ROLE), numpy.bool_)
def test_date(self, model, index):
pandasDate = pandas.Timestamp("1990-10-08T10:15:45")
qDate = QtCore.QDateTime.fromString(str(pandasDate), Qt.ISODate)
dataFrame = pandas.DataFrame([pandasDate], columns=['A'])
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == qDate
assert model.data(index, role=Qt.EditRole) == qDate
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == pandasDate
assert isinstance(model.data(index, role=DATAFRAME_ROLE), pandas.Timestamp)
class TestSetData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame([10], columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
return model.index(0, 0)
def test_invalidIndex(self, model):
assert model.setData(QtCore.QModelIndex(), None) == False
def test_nothingHasChanged(self, model, index):
assert model.setData(index, 10) == False
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
with pytest.raises(TypeError) as excinfo:
model.setData(index, numpy.complex64(92+151j))
assert "unhandled data type" in unicode(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
(u"äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame =
|
pandas.DataFrame([value], columns=['A'])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from scipy.stats import norm
from data_unprocessed import X, y, mean_team_stats
from train import rf, xgb, et, sv, gb
from calibration import rf_isotonic, xgb_sigmoid, gb_sigmoid, sv_sigmoid, et_isotonic
from schedule import games, num_of_games
import warnings
warnings.filterwarnings("ignore")
today_df = pd.DataFrame()
# Similar code as predict.py but this is used to compare uncalibrated probabilities vs. calibrated probabilities
for game in games:
road_team, home_team = game[0],game[1]
team_a = mean_team_stats.loc[mean_team_stats['TEAM_NAME'] == road_team]
team_a.drop(['TEAM_ID','TEAM_NAME'],axis=1,inplace=True)
team_b = mean_team_stats.loc[mean_team_stats['TEAM_NAME'] == home_team]
team_b.drop(['TEAM_ID','TEAM_NAME'],axis=1,inplace=True)
cols_a = [col + 'A' for col in team_a.columns]
cols_b = [col + 'B' for col in team_b.columns]
team_a.columns = cols_a
team_a.reset_index(drop=True,inplace=True)
team_b.columns = cols_b
team_b.reset_index(drop=True,inplace=True)
game = pd.concat([team_a,team_b],axis=1)
today_df = today_df.append(game)
today_df.reset_index(drop=True,inplace=True)
# Add home court adv. columns
today_df['TEAMA_HCA'] = pd.Series([0]*num_of_games, index=today_df.index)
today_df['TEAMB_HCA'] = pd.Series([1]*num_of_games, index=today_df.index)
today_df = today_df.reindex_axis(sorted(today_df.columns), axis=1)
# Model prediction
rf_prob = rf.predict_proba(today_df)
rf_prob = rf_prob[:,1]
xgb_prob = xgb.predict_proba(today_df)
xgb_prob = xgb_prob[:,1]
gb_prob = gb.predict_proba(today_df)
gb_prob = gb_prob[:,1]
et_prob = et.predict_proba(today_df)
et_prob = et_prob[:,1]
sv_prob = sv.predict_proba(today_df)
sv_prob = sv_prob[:,1]
# Process predictions for neat display in dataframe
games_data = np.empty((len(games),10))
games_data[:,0] = np.around(rf_prob*100,2)
games_data[:,1] = np.around(norm(0,10.5).ppf(rf_prob),1) # Convert win probability into point spread
games_data[:,2] = np.around(xgb_prob*100,2)
games_data[:,3] = np.around(norm(0,10.5).ppf(xgb_prob),1)
games_data[:,4] = np.around(gb_prob*100,2)
games_data[:,5] = np.around(norm(0,10.5).ppf(gb_prob),1)
games_data[:,6] = np.around(sv_prob*100,2)
games_data[:,7] = np.around(norm(0,10.5).ppf(sv_prob),1)
games_data[:,8] = np.around(et_prob*100,2)
games_data[:,9] = np.around(norm(0,10.5).ppf(et_prob),1)
games_str = []
for game in games:
t1 = game[0]
t2 = game[1]
teams = t1 + ' at ' + t2
games_str.append(teams)
# Before calibration:
print("*******************BEFORE CALIBRATION*******************")
games_df =
|
pd.DataFrame(data=games_data,index=games_str,columns=['RF Road Team Win Prob','RF Road Team Point Spread','XGB Road Team Win Prob','XGB Road Team Point Spread','GB Road Team Win Prob','GB Road Team Point Spread','SV Road Team Win Prob','SV Road Team Point Spread','ET Road Team Win Prob','ET Road Team Point Spread'])
print(games_df)
|
pandas.DataFrame
|
"""
vocmaxlib
This python package calculates the maximum sting size for a photovoltaic
installation. The method is consistent with the NEC 2017 690.7 standard.
toddkarin
"""
import numpy as np
import pvlib
import pvlib.bifacial
# import nsrdbtools
# import socket
# import matplotlib
# matplotlib.use('TkAgg')
# import matplotlib.pyplot as plt
import pandas as pd
import datetime
import glob
import pytz
from vocmax import nsrdb
import tqdm
import os
import urllib
import pytz
import sys
import os
import warnings
from pvlib.iotools import get_psm3
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from vocmax.bifacial import pvfactors_timeseries
import glob
import vocmax
# from pvlib.bifacial import PVFactorsReportBuilder as PVFactorsReportBuilder
# Parameters entering into Voc calculation:
cec_modules = pvlib.pvsystem.retrieve_sam('CeCMod')
# Descriptions of hte various parameters used in the calculation.
explain = {
'Voco': 'Open circuit voltage at reference conditions, in V',
'Bvoco': 'Temperature dependence of open circuit voltage, in V/C',
'Mbvoc': """Coefficient providing the irradiance dependence of the
temperature coefficient of open circuit voltage, typically assumed to be
zero, in V/C
""",
'n_diode': 'Diode ideality factor, unitless',
'cells_in_series': 'Number of cells in series in each module, dimensionless',
'FD': """Fraction of diffuse irradiance arriving at the cell, typically
assumed to be 1, dimensionless
""",
'alpha_sc': """The short-circuit current temperature coefficient of the
module, in A/C
""",
'a_ref': """The product of the usual diode ideality factor (n_diode,
unitless), number of cells in series (cells_in_series), and cell thermal
voltage at reference conditions, in units of V.
""",
'I_L_ref': """The light-generated current (or photocurrent) at reference
conditions, in amperes.
""",
'I_o_ref': """The dark or diode reverse saturation current at reference
conditions, in amperes.
""",
'R_sh_ref': """The shunt resistance at reference conditions, in ohms.""",
'R_s': """The series resistance at reference conditions, in ohms.""",
'Isco': """Short circuit current at reference conditions, in amperes.""",
'Impo': """Maximum-power current at reference conditions, in amperes.""",
'Vmpo': """Maximum-power voltage at reference conditions, in volts.""",
'Pmpo': """Maximum-power power at reference conditions, in watts.""",
'Bisco': """Temperature coefficient of short circuit current, in A/C"""
}
def get_weather_data(lat, lon,
api_key,
cache_directory='cached_weather_data',
attributes='ghi,dhi,dni,wind_speed,air_temperature',
force_download=False,
full_name='<NAME>',
email='<EMAIL>',
affiliation='vocmax',
years=np.arange(1998, 2018.5),
interval=30,
):
"""
Retrieve weather data from the national solar radiation database (NSRDB).
Description
-----------
df, info = get_weather_data(lat,lon,api_key) gets weather data from the
NSRDB using the NSRDB api. Data download for a single location takes
around 3 minutes. Once weather data is downloaded, it is stored in a
local cache so it can be retrieved quickly. One sample point (lat=37.876,
lon=-122.247) is provided with the function so sample data can be easily
loaded without an api key.
Api keys are available free of charge at https://developer.nrel.gov/signup/
Note can only donwload data from NSRDB sequentially (not possible to
download data using multiple scripts in parallel).
Examples
--------
lat, lon = 37.876, -122.247
# Note: Replace with your api key
api_key = '<KEY>'
df, info = vocmax.get_weather_data(lat,lon,api_key)
Parameters
----------
lat : float or int
latitude in decimal degrees, between -90 and 90, north is positive
lon : float or int
longitude in decimal degrees, between -180 and 180, east is positive
api_key : str
NREL Developer Network API key
email : str
NREL API uses this to automatically communicate messages back
to the user only if necessary
names : str, default 'tmy'
PSM3 API parameter specifing year or TMY variant to download, see notes
below for options
interval : int, default 60
interval size in minutes, can only be either 30 or 60. Only used for
single-year requests (i.e., it is ignored for tmy/tgy/tdy requests).
leap_day : boolean, default False
include leap day in the results. Only used for single-year requests
(i.e., it is ignored for tmy/tgy/tdy requests).
full_name : str, default 'pvlib python'
optional
affiliation : str, default 'pvlib python'
optional
timeout : int, default 30
time in seconds to wait for server response before timeout
force_download : bool
If true, force downloading of weather data regardless of weather
that particular location has already been downloaded. Default is false.
tz_localize : bool
Weather to localize the time zone.
Returns
-------
df : pandas dataframe
Dataframe containing weather data with fields
'year' - year of row.
'month', 'day', 'hour', 'minute', 'dni', 'ghi', 'dhi',
'temp_air', 'wind_speed'.
info : dictionary
Dictionary containting information on the weather dataset.
"""
# First check if data exists in cahce directory.
if not force_download:
search_str = os.path.join(cache_directory,
'*_{:3.3f}_{:3.3f}.npz'.format(lat, lon))
print(search_str)
# One sample data point is provided with the package so that users don't
# have to get an api key to try it out.
if '{:3.3f}_{:3.3f}'.format(lat, lon) == '37.876_-122.247':
print('getting sample data point')
dir_path = os.path.dirname(os.path.realpath(__file__))
df, info = nsrdb.get_local_weather_data(
os.path.join(dir_path,
'123796_37.89_-122.26_search-point_37.876_-122.247.npz')
)
return df, info
# Otherwise search the cache for a weather data file that has already
# been downloaded.
filename = glob.glob(search_str)
if len(filename) > 0:
# Cached weather data found, load it
df, info = nsrdb.get_local_weather_data(filename[0])
# TODO: Add checks that the loaded file has the same options as in the function call.
return df, info
else:
# No cached weather data found.
pass
# Pull data from NSRDB because either force_download=True or no cached datafile found.
print('Downloading weather data and saving to "cached_weather_data" ...')
for j in tqdm.tqdm(range(len(years))):
year = '{:.0f}'.format(years[j])
info_iter, df_iter = get_psm3(
latitude=lat,
longitude=lon,
api_key=api_key,
email=email,
names=year,
interval=30,
leap_day=False,
full_name=full_name,
affiliation=affiliation,
timeout=30)
#
# # Declare url string
# url = 'http://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(
# year = year, lat = lat, lon = lon, leap = leap_year, interval = interval,
# utc = utc, name = your_name, email = your_email,
# mailing_list = mailing_list, affiliation = your_affiliation,
# reason = reason_for_use, api = api_key, attr = attributes)
#
# # file_name, urllib.request.urlretrieve(url, "testfile.txt")
# with urllib.request.urlopen(url) as f:
# # Get the data as a string.
# response = f.read().decode('utf-8')
#
# # Read the first few lines to get info on datafile
# info_df = pd.read_csv(StringIO(response), nrows=1)
#
# # Create a dictionary for the info file.
# info_iter = {}
# for p in info_df:
# info_iter[p] = info_df[p].iloc[0]
#
# df_iter = pd.read_csv(StringIO(response), skiprows=2)
#
# if np.diff(df_iter[0:2].Minute) == 30:
# interval = '30'
# info_iter['interval_in_hours'] = 0.5
# elif np.diff(df_iter[0:2].Minute) == 0:
# interval = '60'
# info_iter['interval_in_hours'] = 1
# else:
# print('Interval not understood!')
info_iter['interval_in_hours'] = interval / 60
# Set the time index in the pandas dataframe:
year_iter = str(df_iter['Year'][0])
df_iter = df_iter.set_index(
pd.date_range('1/1/{yr}'.format(yr=year_iter),
freq='{}Min'.format(interval),
periods=len(df_iter)))
df_iter.index = df_iter.index.tz_localize(
pytz.FixedOffset(float(info_iter['Time Zone'] * 60)))
if j == 0:
info = info_iter
df = df_iter
else:
df = df.append(df_iter)
# Process/compress the downloaded dfs.
info['timedelta_in_years'] = (df.index[-1] - df.index[0]).days / 365
# Convert to int for lowering file size.
dni = np.array(df['DNI'].astype(np.int16))
dhi = np.array(df['DHI'].astype(np.int16))
ghi = np.array(df['GHI'].astype(np.int16))
temp_air = np.array(df['Temperature'].astype(np.float32))
wind_speed = np.array(df['Wind Speed'].astype(np.float16))
year = np.array(df['Year'].astype(np.int16))
month = np.array(df['Month'].astype(np.int8))
day = np.array(df['Day'].astype(np.int8))
hour = np.array(df['Hour'].astype(np.int8))
minute = np.array(df['Minute'].astype(np.int8))
cache_directory = 'cached_weather_data'
if not os.path.exists(cache_directory):
print('Creating cache directory')
os.mkdir(cache_directory)
save_filename = os.path.join(cache_directory,
'{}_{:3.2f}_{:3.2f}_search-point_{:3.3f}_{:3.3f}.npz'.format(
info['Location ID'], info['Latitude'],
info['Longitude'], lat, lon)
)
# Write to file.
np.savez_compressed(save_filename,
Source=info['Source'],
Location_ID=info['Location ID'],
Latitude=info['Latitude'],
Longitude=info['Longitude'],
Elevation=info['Elevation'],
local_time_zone=info['Local Time Zone'],
interval_in_hours=info['interval_in_hours'],
timedelta_in_years=info['timedelta_in_years'],
Version=info['Version'],
dni=dni,
dhi=dhi,
ghi=ghi,
temp_air=temp_air,
wind_speed=wind_speed,
year=year,
month=month,
day=day,
hour=hour,
minute=minute)
# Reload from file.
df, info = nsrdb.get_local_weather_data(save_filename)
return df, info
# def ashrae_get_data():
# dir_path = os.path.dirname(os.path.realpath(__file__))
#
# # Load temperature difference data.
# ashrae = pd.read_csv(
# os.path.join(dir_path, 'ASHRAE2017_temperature_data.csv')
# )
# return ashrae
def ashrae_get_design_conditions_at_loc(lat, lon, ashrae):
"""
Get the ASHRAE design conditions data closest to the lat/lon of interest.
Parameters
----------
lat
lon
ashrae : dataframe
Returns
-------
dataframe
fields are
'Latitude'
'Longitude'
'Extreme Annual Mean Minimum Dry Bulb Temperature' - ASHRAE
extreme minimum dry bulb temperature, in C
"""
# df = ashrae_get_design_conditions()
# Calculate distance to search point.
distance = nsrdb.haversine_distance(lat, lon, ashrae['Lat'], ashrae['Lon'])
closest_idx = distance.idxmin()
return ashrae.iloc[closest_idx]
def nec_correction_factor(temperature):
"""
NEC 690.7(A)(2) correction factor from NEC2017.
Parameters
----------
temperature : numeric
Temperature in C.
Returns
-------
correction_factor : flat
"""
is_array = isinstance(temperature, np.ndarray)
temperature = np.array([temperature])
f = np.zeros_like(temperature, dtype='float') + 1
f[temperature < 25] = 1.02
f[temperature < 20] = 1.04
f[temperature < 15] = 1.06
f[temperature < 10] = 1.08
f[temperature < 5] = 1.10
f[temperature < 0] = 1.12
f[temperature < -5] = 1.14
f[temperature < -10] = 1.16
f[temperature < -15] = 1.18
f[temperature < -20] = 1.20
f[temperature < -25] = 1.21
f[temperature < -30] = 1.23
f[temperature < -35] = 1.25
f[np.isnan(temperature)] = np.nan
if not is_array:
f = f[0]
return f
def get_nsrdb_temperature_error(lat, lon,
number_of_closest_points=5):
"""
Find the temperature error for a particular location.
The NSRDB database provides temeprature data for many locations. However,
these data are taken from the MERRA-2 dataset, and have some error
compared to ground measurements. The temperature error depends on location.
As a comparison, we calculated the mean minimum extreme minimum dry bulb
temperature using NSRDB data and compared to ASHRAE data. The temperature
difference determines the safety factor necessary for string length
calculations.
This function finds the closest points to a particular lat,lon coordinate
in the ASHRAE dataset and returns the maximum temperature difference (
NSRDB - ASHRAE) for these locations. A higher temperature difference
means that the NSRDB is overestimating the true temperature that is
measured at a ground station. Higher positive temperature differences
mean that a larger safety factor should be used when calculating string
length. The Safety factor can be calculated
Examples
--------
temperature_difference = vocmax.get_nsrdb_temperature_error(lat,lon)
Parameters
----------
lat : float
latitude of search point in fractional degrees
lon : float
longitude of search point in fractional degrees
number_of_closest_points : int
The number of closest datapoints to find. Default is 5.
Returns
-------
temperature_difference : float
max temperature difference between NSRDB point and closest ASHRAE
points. A positive number means that the NSRDB design temperature is
higher than the ASHRAE design temperature. If a positive temperature
difference is found, then an additional safety factor is suggested to
account for this error in the NSRDB dataset.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
# Load temperature difference data.
df = pd.read_pickle(
os.path.join(dir_path, 'nsrdb_ashrae_comparison.pkl')
)
# Calculate distance to search point.
distance = vocmax.nsrdb.haversine_distance(lat, lon, df['lat'], df['lon'])
# Find the closest locations.
distance_sort = distance.sort_values()
closest_idx = distance_sort.index[:number_of_closest_points]
# Calculate temperature difference
temperature_difference = df['nsrdb-ashrae Extreme_Annual_Mean_Min_DB'].loc[
closest_idx]
return temperature_difference.max()
def ashrae_import_design_conditions(filename='2017DesignConditions_s.xlsx'):
"""
Load the ASHRAE 2017 design conditions excel file. This file is NOT
provided in vocmax, it must be purchased directly from ASHRAE and added
to the current directory. The filename is '2017DesignConditions_s.xlsx'.
The '_s' at the end of the filename stands for 'SI'. There is also
another file '2017DesignConditions_p.xlsx' that contains measurements in
imperial units, do not use this file.
In order to use this function, purchase the weather data viewer DVD,
version 6.0, available at:
https://www.techstreet.com/ashrae/standards/weather-data-viewer-dvd-version-6-0?ashrae_auth_token=<PASSWORD>89-8065208f2e36&product_id=1949790
Importing the excel file takes around 1 minute, the data is then saved as
a csv file with name filename + '.csv' in the current directory. This
makes loading quick the second time.
Parameters
----------
filename : string
Filename to import.
Returns
-------
df : dataframe
Pandas dataframe containing certain fields of the weather data file.
"""
# filename = '2017DesignConditions_s.xlsx'
df = pd.read_excel(filename,
skiprows=0,
sheet_name=0,
header=[1, 2, 3],
verbose=False)
filename_out = filename + '.csv'
df_out = pd.DataFrame(
{'Lat': np.array(df['Lat']).flatten(),
'Lon': np.array(df['Lon']).flatten(),
'Country': np.array(df['Country']).flatten(),
'Station Name': np.array(df['Station Name']).flatten(),
'Extreme_Annual_Mean_Min_DB': np.array(
df['Extreme Annual DB']['Mean']['Min']).flatten(),
'Extreme_Annual_Standard Deviation_Min_DB': np.array(
df['Extreme Annual DB']['Standard Deviation']['Min']).flatten(),
'20-Year Return Period Extreme Min DB': np.array(
df['n-Year Return Period Values of Extreme DB']['n=20 years'][
'Min']).flatten(),
}
)
df_out.to_csv(filename_out, index=False)
return df_out
def ashrae_is_design_conditions_available(
filename='2017DesignConditions_s.xlsx'):
return os.path.exists(filename)
def ashrae_get_design_conditions(filename='2017DesignConditions_s.xlsx'):
"""
Get the ASHRAE design conditions data.
Parameters
----------
filename
Returns
-------
df : dataframe
Pandas dataframe containing certain fields of the ASHARE design
conditions file
"""
if os.path.exists(filename + '.csv'):
df = pd.read_csv(filename + '.csv')
elif os.path.exists(filename):
print(
"""Importing and compressing ASHRAE design conditions excel file. Future calls will quickly call csv version. """)
print('Found file: {}'.format(filename))
print('Expected loading time: 1.0 minute')
df = ashrae_import_design_conditions(filename)
else:
raise Exception(
"Design conditions file '{}' not found. File must be purchased from ASHRAE and placed in current directory.".format(
filename))
return df
def simulate_system(weather, info, module_parameters,
racking_parameters, thermal_model,
irrad_model='perez',
nighttime_irradiance_addition=0
):
"""
Use the PVLIB SAPM model to calculate maximum Voc.
Parameters
----------
weather : Dataframe
Weather data dataframe containing the columns:
'dni': Direct Normal Irradiance (W/m^2)
'dhi': Diffuse Horizontal Irradiance (W/m^2)
'ghi' Global Horizontal Irradiance (W/m^2)
'temp_air': air temperature (C)
'wind_speed': 10 m wind speed in (m/s)
info : dict
Dictionary containing location information with fields:
'Latitude': float
latitude in degrees
'Longitude': float
longitude in degrees.
Other fields may be included in info as well and will not interfere
with operation.
module_parameters : dict
Dict or Series containing the below fields describing the module
'Voco' : float
Open circuit voltage at reference conditions.
'Bvoco' : float
Temperature coefficient of open circuit voltage, in Volts/C.
'cells_in_series' : int
Number of cells in series in the module.
'n_diode' : float
Diode ideality factor
'Mbvoc' : float
Irradiance dependence of the temperature coefficient of
open-circuit voltage, typically assumed to be zero.
'FD' : float
Fraction of diffuse irradiance used by the module.
'efficiency' : float
Module fractional efficiency.
'iv_model' : string
Model for calculating Voc. Can be 'sapm', 'cec' or 'desoto'.
TODO: Describe better.
'aoi_model' : string
Model for calculating the angle-of-incidence loss function. Can
be 'no_loss' or 'ashrae'. The 'no_loss' method assumes that no
extra reflection losses are accrued at non-normal angles of
incidence. The 'ashrae' option uses the model in
pvlib.pvsystem.ashraeiam
'is_bifacial' : bool
True if module is bifacial. Using False will force the use of
monofacial models even if 'bifacial_model' in the
racking_parameters input dict is set to a value.
bifaciality_factor : float
Number describing the efficiency of the backside of the module
relative to the frontside. A typical values is 0.7.
racking_parameters : dict
dictionary describing the racking setup. Contains fields:
'racking_type' : str
Can be 'fixed_tilt' for a stationary PV system or 'single_axis'
for a single axis tracker.
'surface_tilt' : float
If racking_type is 'fixed_tilt', specify the surface tilt in
degrees from horizontal.
'surface_azimuth' : float
If racking type is 'surface_azimuth', specify the racking azimuth
in degrees. A value of 180 degrees has the module face oriented
due South.
'axis_tilt' : float
If racking_type is 'single_axis', specify the the tilt of the
axis of rotation (i.e, the y-axis defined by axis_azimuth) with
respect to horizontal, in decimal degrees. Standard value is 0.
'axis_azimuth' : float
If racking_type is 'single_axis', specify a value denoting the
compass direction along which the axis of rotation lies. Measured
in decimal degrees East of North. Standard value is 0.
'backtrack' : bool
Controls whether the tracker has the capability to ''backtrack''
to avoid row-to-row shading. False denotes no backtrack
capability. True denotes backtrack capability.
'gcr' : float
A value denoting the ground coverage ratio of a tracker system
which utilizes backtracking; i.e. the ratio between the PV array
surface area to total ground area. A tracker system with modules
2 meters wide, centered on the tracking axis, with 6 meters
between the tracking axes has a gcr of 2/6=0.333. If gcr is not
provided, a gcr of 2/7 is default. gcr must be <=1
bifacial_model : string
Can be 'proportional' or 'pvfactors'. The 'proportional' bifacial
modeling method calculates the effective irradiance on the
frontside of the module and then assumes that the backside
irradiance is equal to the frontside irradiance times the
backside_irradiance_fraction times the bifaciality_factor. The
'pvfactors' method uses bifacial modeling found in the pvfactors
package.
backside_irradiance_fraction : float
For simple bifacial modeling, the backside irradiance is assumed
to be equal to the frontside irradiance times the
backside_irradiance_fraction. Required if using
bifacial_model 'proportional'. Typical value is 0.3.
pvrow_height : float.
Height of the pv rows, measured at their center (m). Required if
using bifacial_model 'pvfactors'.
pvrow_width : float
Width of the pv rows in the considered 2D plane (m). Required if
using bifacial_model 'pvfactors'.
albedo: float
Ground albedo. Required if using bifacial_model 'pvfactors'.
n_pvrows: int, default 3
Number of PV rows to consider in the PV array. Required if
using bifacial_model 'pvfactors'.
index_observed_pvrow: int, default 1
Index of the PV row whose incident irradiance will be returned.
Indices of PV rows go from 0 to n_pvrows-1. Required if using
bifacial_model 'pvfactors'.
rho_front_pvrow: float, default 0.03
Front surface reflectivity of PV rows. Required if using
bifacial_model 'pvfactors'.
rho_back_pvrow: float, default 0.05
Back surface reflectivity of PV rows. Required if using
bifacial_model 'pvfactors'.
horizon_band_angle: float, default 15
Elevation angle of the sky dome's diffuse horizon band (deg).
Required if using bifacial_model 'pvfactors'.
thermal_model : dict
named_model : string
If named_model is 'explicit', then use SAPM parameters defined by
a, b, and deltaT. Otherwise named_model can be one of the
following strings:
‘open_rack_cell_glassback’ (default)
‘roof_mount_cell_glassback’
‘open_rack_cell_polymerback’
‘insulated_back_polymerback’
‘open_rack_polymer_thinfilm_steel’
‘22x_concentrator_tracker’
a: float
SAPM module parameter for establishing the upper limit for
module temperature at low wind speeds and high solar
irradiance.
b :float
SAPM module parameter for establishing the rate at which the
module temperature drops as wind speed increases (see SAPM
eqn. 11).
deltaT :float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the reference
irradiance, E0.
open_circuit_rise : bool
The SAPM parameters are measured for modules at maximum power
point. At open-circuit voltage the module is warmer because less
energy is exported as electricity. If open_circuit_rise is True
then this temperature rise is taken into account, if False then
it is not.
thermal_mass : bool
Weather to take into account the thermal mass of the modules when
calculating temperature. Thermal mass is performed using an
exponentially weighted moving average [Bosco2016]
thermal_time_constant : float
Thermal time constant of the modules, in minutes.
irrad_model : str
Irradiance model for determining in-plane sky diffuse irradiance
component using the specified sky diffuse irradiance model. Default
is 'perez'
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Returns
-------
dataframe containing simulation results. Includes the fields present in
input 'weather' in addtion to:
'v_oc': open circuit voltage in Volts
'aoi': angle of incidence in degrees.
'temp_cell': cell temeprature in C.
References
----------
[Bosco2016] <NAME>, et al., Climate specific thermomechanical fatigue
of flat plate photovoltaic module solder joints, Microelectronics
Reliability (2016), http://dx.doi.org/10.1016/j.microrel.2016.03.024
"""
# Rename the weather data for input to PVLIB.
if np.all([c in weather.columns for c in ['dni', 'dhi', 'ghi', 'temp_air',
'wind_speed', 'year', 'month',
'day', 'hour', 'minute']]):
# All colmuns are propoerly labeled, skip any relabeling.
pass
else:
# Try renaming from NSRDB default values.
weather = weather.rename(
columns={'DNI': 'dni',
'DHI': 'dhi',
'GHI': 'ghi',
'Temperature': 'temp_air',
'Wind Speed': 'wind_speed',
'Year': 'year',
'Month': 'month',
'Day': 'day',
'Hour': 'hour',
'Minute': 'minute'})
df = weather.copy()
# Set location
location = pvlib.location.Location(latitude=info['Latitude'],
longitude=info['Longitude'])
# Add module parameters if some aren't specified.
module_parameters = add_default_module_params(module_parameters)
# #
# start_time = time.time()
# # This is the most time consuming step
# solar_position = location.get_solarposition(weather.index, method='nrel_numpy')
# print( time.time()-start_time)
#
# Ephemeris method is faster and gives very similar results.
solar_position = location.get_solarposition(weather.index,
method='ephemeris')
# Get surface tilt and azimuth
if racking_parameters['racking_type'] == 'fixed_tilt':
surface_tilt = racking_parameters['surface_tilt']
surface_azimuth = racking_parameters['surface_azimuth']
# idealized assumption
elif racking_parameters['racking_type'] == 'single_axis':
# Avoid nan warnings by presetting unphysical zenith angles.
solar_position['apparent_zenith'][
solar_position['apparent_zenith'] > 90] = 90
# Todo: Check appraent_zenith vs. zenith.
single_axis_vals = pvlib.tracking.singleaxis(
solar_position['apparent_zenith'],
solar_position['azimuth'],
axis_tilt=racking_parameters['axis_tilt'],
axis_azimuth=racking_parameters['axis_azimuth'],
max_angle=racking_parameters['max_angle'],
backtrack=racking_parameters['backtrack'],
gcr=racking_parameters['gcr']
)
surface_tilt = single_axis_vals['surface_tilt']
surface_azimuth = single_axis_vals['surface_azimuth']
else:
raise Exception('Racking system not recognized')
# Extraterrestrial radiation
dni_extra = pvlib.irradiance.get_extra_radiation(solar_position.index)
airmass = location.get_airmass(solar_position=solar_position)
# Perez is a good diffuse sky model
total_irrad = pvlib.irradiance.get_total_irradiance(
surface_tilt,
surface_azimuth,
solar_position['zenith'],
solar_position['azimuth'],
weather['dni'].astype('float'),
weather['ghi'].astype('float'),
weather['dhi'].astype('float'),
model='perez',
dni_extra=dni_extra,
airmass=airmass['airmass_relative'],
albedo=racking_parameters['albedo'])
# Add a small irradiance during night time
for k in total_irrad.keys():
total_irrad[k][np.isnan(total_irrad[k])] = 0
total_irrad[k] = total_irrad[k] + nighttime_irradiance_addition
if racking_parameters['racking_type'] == 'fixed_tilt':
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth,
solar_position['zenith'],
solar_position['azimuth'])
elif racking_parameters['racking_type'] == 'single_axis':
aoi = single_axis_vals['aoi']
else:
raise Exception('Racking type not understood')
# aoi = single_axis_vals['aoi']
if (not 'named_model' in thermal_model) or thermal_model[
'named_model'] == 'explicit':
thermal_model_params = {k: thermal_model[k] for k in
['a', 'b', 'deltaT']}
else:
temperature_model_parameters = \
pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']
thermal_model_params = temperature_model_parameters[
thermal_model['named_model']]
temperature_cell = pvlib.temperature.sapm_cell(
poa_global=total_irrad['poa_global'],
temp_air=weather['temp_air'],
wind_speed=weather['wind_speed'],
a=thermal_model_params['a'],
b=thermal_model_params['b'],
deltaT=thermal_model_params['deltaT'])
# temps = pvlib.temperature.sapm_cell(total_irrad['poa_global'],
# weather['wind_speed'],
# weather['temp_air'],
# thermal_model_params)
# if thermal_model['thermal_mass']:
# thermal_alpha = np.exp(-(info['interval_in_hours'] * 60) / 270)
#
if thermal_model['open_circuit_rise']:
temperature_cell = weather['temp_air'] + \
(temperature_cell - weather['temp_air']) / (
1 - module_parameters['efficiency'])
# Spectral loss is typically very small on order of a few percent, assume no
# spectral loss for simplicity
spectral_loss = 1
if not 'aoi_model' in module_parameters:
module_parameters['aoi_model'] = 'no_loss'
if not 'FD' in module_parameters:
module_parameters['FD'] = 1
# AOI loss:
if module_parameters['aoi_model'] == 'no_loss':
aoi_loss = 1
elif module_parameters['aoi_model'] == 'ashrae':
aoi_loss = pvlib.iam.ashrae(aoi,
b=module_parameters['ashrae_iam_param'])
else:
raise Exception('aoi_model must be ashrae or no_loss')
# Calculate effective irradiance.
if ('is_bifacial' in module_parameters) and \
(module_parameters['is_bifacial'] == True):
if not 'bifacial_model' in racking_parameters:
warnings.warn("""'bifacial_model' in racking_parameters is not
specified, can be 'simple' or 'pvfactors'. Defaulting to
'simple'.""")
racking_parameters['bifacial_model'] = 'proportional'
if racking_parameters['bifacial_model'] == 'proportional':
effective_irradiance_front = calculate_effective_irradiance(
total_irrad['poa_direct'],
total_irrad['poa_diffuse'],
aoi_loss=aoi_loss,
FD=module_parameters['FD']
)
if not 'backside_irradiance_fraction' in racking_parameters:
raise Exception("""Must specify 'backside_irradiance_fraction' in
racking_parameters for bifacial modeling. """
)
effective_irradiance_back = effective_irradiance_front * \
racking_parameters[
'backside_irradiance_fraction'] * \
module_parameters['bifaciality_factor']
effective_irradiance = effective_irradiance_front + effective_irradiance_back
df['effective_irradiance_front'] = effective_irradiance_front
df['effective_irradiance_back'] = effective_irradiance_back
elif racking_parameters['bifacial_model'] == 'pvfactors':
total_inc_front, total_inc_back, poa_front_absorbed, poa_back_absorbed = pvfactors_timeseries(
solar_position['azimuth'], solar_position['zenith'],
surface_azimuth,
surface_tilt,
racking_parameters['axis_azimuth'],
weather.index, weather['dni'], weather['dhi'],
racking_parameters['gcr'],
racking_parameters['pvrow_height'],
racking_parameters['pvrow_width'],
racking_parameters['albedo'],
n_pvrows=racking_parameters['n_pvrows'],
# fast_mode_pvrow_index=racking_parameters['fast_mode_pvrow_index'],
index_observed_pvrow=racking_parameters['index_observed_pvrow'],
rho_front_pvrow=racking_parameters['rho_front_pvrow'],
rho_back_pvrow=racking_parameters['rho_back_pvrow'],
horizon_band_angle=racking_parameters['horizon_band_angle'],
# run_parallel_calculations=racking_parameters['run_parallel_calculations'],
# n_workers_for_parallel_calcs=racking_parameters['n_workers_for_parallel_calcs']
)
effective_irradiance_front = np.nan_to_num(poa_front_absorbed)
effective_irradiance_back = np.nan_to_num(poa_back_absorbed)
effective_irradiance = effective_irradiance_front + effective_irradiance_back
df['effective_irradiance_front'] = effective_irradiance_front
df['effective_irradiance_back'] = effective_irradiance_back
else:
raise Exception(
"racking_parameters['bifacial_model'] must be either 'proportional' or 'pvfactors'. ")
else:
# Not bifacial, i.e. monofacial module.
effective_irradiance = calculate_effective_irradiance(
total_irrad['poa_direct'],
total_irrad['poa_diffuse'],
aoi_loss=aoi_loss,
FD=module_parameters['FD']
)
v_oc = sapm_voc(effective_irradiance, temperature_cell,
module_parameters)
df['aoi'] = aoi
# df['aoi_loss'] = aoi_loss
df['temp_cell'] = temperature_cell
df['temp_air'] = weather['temp_air']
df['effective_irradiance'] = effective_irradiance
df['v_oc'] = v_oc
df['surface_tilt'] = surface_tilt
df['surface_azimuth'] = surface_azimuth
df['solar_zenith'] = solar_position['apparent_zenith']
df['solar_azimuth'] = solar_position['azimuth']
df['poa_direct'] = total_irrad['poa_direct']
df['poa_diffuse'] = total_irrad['poa_diffuse']
return df
#
# def pvfactors_timeseries(
# solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
# axis_azimuth,
# timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
# n_pvrows=3,fast_mode_pvrow_index=2,index_observed_pvrow=1,
# rho_front_pvrow=0.03, rho_back_pvrow=0.05,
# horizon_band_angle=15.,
# run_parallel_calculations=True, n_workers_for_parallel_calcs=2):
# """
# Calculate front and back surface plane-of-array irradiance on
# a fixed tilt or single-axis tracker PV array configuration, and using
# the open-source "pvfactors" package.
# Please refer to pvfactors online documentation for more details:
# https://sunpower.github.io/pvfactors/
#
# Parameters
# ----------
# solar_azimuth: numeric
# Sun's azimuth angles using pvlib's azimuth convention (deg)
# solar_zenith: numeric
# Sun's zenith angles (deg)
# surface_azimuth: numeric
# Azimuth angle of the front surface of the PV modules, using pvlib's
# convention (deg)
# surface_tilt: numeric
# Tilt angle of the PV modules, going from 0 to 180 (deg)
# axis_azimuth: float
# Azimuth angle of the rotation axis of the PV modules, using pvlib's
# convention (deg). This is supposed to be fixed for all timestamps.
# timestamps: datetime or DatetimeIndex
# List of simulation timestamps
# dni: numeric
# Direct normal irradiance (W/m2)
# dhi: numeric
# Diffuse horizontal irradiance (W/m2)
# gcr: float
# Ground coverage ratio of the pv array
# pvrow_height: float
# Height of the pv rows, measured at their center (m)
# pvrow_width: float
# Width of the pv rows in the considered 2D plane (m)
# albedo: float
# Ground albedo
# n_pvrows: int, default 3
# Number of PV rows to consider in the PV array
# fast_mode_pvrow_index: int
# In fast mode, the user will be able to calculate rapidly (but with
# additional approximations) the incident irradiance on the back side
# of one PV row in the PV array, and the index of that PV row needs to
# be passed as a keyword argument to fast_mode_pvrow_index
# index_observed_pvrow: int, default 1
# Index of the PV row whose incident irradiance will be returned. Indices
# of PV rows go from 0 to n_pvrows-1.
# rho_front_pvrow: float, default 0.03
# Front surface reflectivity of PV rows
# rho_back_pvrow: float, default 0.05
# Back surface reflectivity of PV rows
# horizon_band_angle: float, default 15
# Elevation angle of the sky dome's diffuse horizon band (deg)
# run_parallel_calculations: bool, default True
# pvfactors is capable of using multiprocessing. Use this flag to decide
# to run calculations in parallel (recommended) or not.
# n_workers_for_parallel_calcs: int, default 2
# Number of workers to use in the case of parallel calculations. The
# '-1' value will lead to using a value equal to the number
# of CPU's on the machine running the model.
#
# Returns
# -------
# front_poa_irradiance: numeric
# Calculated incident irradiance on the front surface of the PV modules
# (W/m2)
# back_poa_irradiance: numeric
# Calculated incident irradiance on the back surface of the PV modules
# (W/m2)
# df_registries: pandas DataFrame
# DataFrame containing detailed outputs of the simulation; for
# instance the shapely geometries, the irradiance components incident on
# all surfaces of the PV array (for all timestamps), etc.
# In the pvfactors documentation, this is refered to as the "surface
# registry".
#
# References
# ----------
# .. [1] Anoma, <NAME>, et al. "View Factor Model and Validation for
# Bifacial PV and Diffuse Shade on Single-Axis Trackers." 44th IEEE
# Photovoltaic Specialist Conference. 2017.
# """
#
# # Convert pandas Series inputs (and some lists) to numpy arrays
# if isinstance(solar_azimuth, pd.Series):
# solar_azimuth = solar_azimuth.values
# elif isinstance(solar_azimuth, list):
# solar_azimuth = np.array(solar_azimuth)
# if isinstance(solar_zenith, pd.Series):
# solar_zenith = solar_zenith.values
# if isinstance(surface_azimuth, pd.Series):
# surface_azimuth = surface_azimuth.values
# elif isinstance(surface_azimuth, list):
# surface_azimuth = np.array(surface_azimuth)
# if isinstance(surface_tilt, pd.Series):
# surface_tilt = surface_tilt.values
# if isinstance(dni, pd.Series):
# dni = dni.values
# if isinstance(dhi, pd.Series):
# dhi = dhi.values
# if isinstance(solar_azimuth, list):
# solar_azimuth = np.array(solar_azimuth)
#
# # Import pvfactors functions for timeseries calculations.
# from pvfactors.run import (run_timeseries_engine,
# run_parallel_engine)
#
# # Build up pv array configuration parameters
# pvarray_parameters = {
# 'n_pvrows': n_pvrows,
# 'axis_azimuth': axis_azimuth,
# 'pvrow_height': pvrow_height,
# 'pvrow_width': pvrow_width,
# 'gcr': gcr,
# 'rho_front_pvrow': rho_front_pvrow,
# 'rho_back_pvrow': rho_back_pvrow,
# 'horizon_band_angle': horizon_band_angle
# }
#
# # Run pvfactors calculations: either in parallel or serially
# if run_parallel_calculations:
#
# # report_builder = ReportBuilder(fast_mode_pvrow_index)
#
# report = run_parallel_engine(
# ReportBuilder(fast_mode_pvrow_index), pvarray_parameters,
# timestamps, dni, dhi,
# solar_zenith, solar_azimuth,
# surface_tilt, surface_azimuth,
# albedo, n_processes=n_workers_for_parallel_calcs,
# fast_mode_pvrow_index=fast_mode_pvrow_index
# )
# else:
# report = run_timeseries_engine(
# PVFactorsReportBuilder.build, pvarray_parameters,
# timestamps, dni, dhi,
# solar_zenith, solar_azimuth,
# surface_tilt, surface_azimuth,
# albedo)
#
# print(report)
# # Turn report into dataframe
# df_report = pd.DataFrame(report, index=timestamps)
#
# return df_report.total_inc_front, df_report.total_inc_back
#
#
# class ReportBuilder(object):
# """A class is required to build reports when running calculations with
# multiprocessing because of python constraints"""
#
# def __init__(self, fast_mode_pvrow_index):
# """Create report builder object for fast mode simulation.
#
# Parameters
# ----------
# fast_mode_pvrow_index : int
# Index of PV row whose back side irradiance we want to report
# """
# self.fast_mode_pvrow_index = fast_mode_pvrow_index
#
# def build(self, report, pvarray):
# # Initialize the report as a dictionary
# if report is None:
# report = {'total_inc_back': []}
# # Add elements to the report
# if pvarray is not None:
# pvrow = pvarray.pvrows[self.fast_mode_pvrow_index]
# report['total_inc_back'].append(
# pvrow.back.get_param_weighted('qinc'))
# else:
# # No calculation was performed, because sun was down
# report['total_inc_back'].append(np.nan)
#
# return report
#
# @staticmethod
# def merge(reports):
# """Works for dictionary reports"""
# report = reports[0]
# # Merge other reports
# keys_report = list(reports[0].keys())
# for other_report in reports[1:]:
# for key in keys_report:
# report[key] += other_report[key]
# return report
#
# class PVFactorsReportBuilder(object):
# """In pvfactors, a class is required to build reports when running
# calculations with multiprocessing because of python constraints"""
#
# def __init__(self, fast_mode_pvrow_index):
# """Create report builder object for fast mode simulation.
#
# Parameters
# ----------
# fast_mode_pvrow_index : int
# Index of PV row whose back side irradiance we want to report
# """
# self.fast_mode_pvrow_index = fast_mode_pvrow_index
#
# # @staticmethod
# def build(self,report, pvarray):
# """Reports will have total incident irradiance on front and
# back surface of center pvrow (index=1)"""
# # Initialize the report as a dictionary
# if report is None:
# list_keys = ['total_inc_back', 'total_inc_front']
# report = {key: [] for key in list_keys}
# # Add elements to the report
# if pvarray is not None:
# # pvrow = pvarray.pvrows[1] # use center pvrow
# pvrow = pvarray.pvrows[self.fast_mode_pvrow_index]
# print(pvrow.front)
# report['total_inc_back'].append(
# pvrow.back.get_param_weighted('qinc'))
# report['total_inc_front'].append(
# pvrow.front.get_param_weighted('qinc'))
# else:
# # No calculation is performed when the sun is down
# report['total_inc_back'].append(np.nan)
# report['total_inc_front'].append(np.nan)
#
# return report
#
# @staticmethod
# def merge(reports):
# """Works for dictionary reports"""
# report = reports[0]
# # Merge only if more than 1 report
# if len(reports) > 1:
# keys_report = list(reports[0].keys())
# for other_report in reports[1:]:
# if other_report is not None:
# for key in keys_report:
# report[key] += other_report[key]
# return report
#
def add_default_module_params(module_parameters):
"""
Adds default fields to the module_parameters dictionary.
Parameters
----------
module_parameters : dict
Examples
--------
>> module = add_default_module_params(module)
Returns
-------
module_parameters : dict
Same as input, except default values are added for the following fields:
'Mbvoc' : 0
'FD' : 1
'iv_model' : 'sapm'
'aoi_model' : 'no_loss'
"""
if not 'Mbvoc' in module_parameters:
module_parameters['Mbvoc'] = 0
if not 'FD' in module_parameters:
module_parameters['FD'] = 1
if not 'iv_model' in module_parameters:
module_parameters['iv_model'] = 'sapm'
if not 'aoi_model' in module_parameters:
module_parameters['aoi_model'] = 'no_loss'
return module_parameters
def make_voc_summary(df, info, module_parameters,
string_design_voltage=1500,
safety_factor=0.023,
ashrae='local_load'):
"""
Calculate maximum Voc expected using four relevant standards. See
documentation for a description of the standards.
Parameters
----------
df : dataframe
Dataframe containing fields: 'v_oc', 'ghi', 'temp_air'
info : dict
Dictionary containing fields 'lat' and 'lon'. These are used to
calculate the ASHRAE standards.
module_parameters : dict
Dictionary containing module parameters. The module paramaters are
used in a direct call to the function calculate_voc.
string_design_voltage : float
Maximum allowable string voltage for the design, in V. Typically 600
V, 1200 V or 1500 V
safety_factor : float
safety factor for calculating string length as a fraction of max Voc.
An example value wuold be 0.023, corresponding to a safety factor of
2.3%. Safety factors are only used for 690.7(A)(1) standards.
Returns
-------
voc_summary : dataframe
Dataframe containing fields:
'max_module_voltage' - the maximum module voltage (not including
safety factor).
'string_design_voltage' - Maximum allowable string voltage for the
design, in V. Typically 600 V, 1200 V or 1500 V
'safety_factor' - safety factor for calculating string length as a
fraction of max Voc. An example value wuold be 0.023, corresponding
to a safety factor of 2.3%. Safety factors are only used for 690.7(A)(1)
standards.
'string_length' - Longest acceptable string length.
'Cell Temperature' - Temperature
"""
voc_summary = pd.DataFrame(
columns=['Conditions', 'max_module_voltage', 'string_design_voltage',
'safety_factor',
'string_length',
'Cell Temperature', 'POA Irradiance', 'long_note'],
index=['690.7(A)(3)-P99.5',
'690.7(A)(3)-P100',
'690.7(A)(1)-DAY',
'690.7(A)(1)-NSRDB',
'690.7(A)(1)-ASHRAE',
'690.7(A)(2)-ASHRAE'])
mean_yearly_min_temp = calculate_mean_yearly_min_temp(df.index,
df['temp_air'])
if type(ashrae) == type(pd.DataFrame()):
ashrae_loc = vocmax.ashrae_get_design_conditions_at_loc(
info['Latitude'], info['Longitude'], ashrae)
lowest_expected_temperature_ashrae = ashrae_loc[
'Extreme_Annual_Mean_Min_DB']
else:
ashrae_available = ashrae_is_design_conditions_available()
if ashrae_available:
ashrae = ashrae_get_design_conditions()
ashrae_loc = vocmax.ashrae_get_design_conditions_at_loc(
info['Latitude'], info['Longitude'], ashrae)
lowest_expected_temperature_ashrae = ashrae_loc[
'Extreme_Annual_Mean_Min_DB']
else:
lowest_expected_temperature_ashrae = np.nan
# mean_yearly_min_temp_ashrae =
mean_yearly_min_day_temp = calculate_mean_yearly_min_temp(
df.index[df['ghi'] > 150],
df['temp_air'][df['ghi'] > 150])
voc_summary['safety_factor'] = 0
for f in ['690.7(A)(3)-P99.5', '690.7(A)(3)-P100']:
voc_summary.loc[f, 'safety_factor'] = safety_factor
# Calculate some standard voc values.
voc_values = {
'690.7(A)(3)-P99.5': np.percentile(np.array(df['v_oc']), 99.5),
'690.7(A)(3)-P100': df['v_oc'].max(),
'690.7(A)(1)-DAY': calculate_voc(1000, mean_yearly_min_day_temp,
module_parameters),
'690.7(A)(1)-ASHRAE': calculate_voc(1000,
lowest_expected_temperature_ashrae,
module_parameters),
'690.7(A)(1)-NSRDB': calculate_voc(1000, mean_yearly_min_temp,
module_parameters),
'690.7(A)(2)-ASHRAE': module_parameters['Voco'] * nec_correction_factor(
lowest_expected_temperature_ashrae),
}
conditions = {
'690.7(A)(3)-P99.5': 'P99.5 Voc',
'690.7(A)(3)-P100': 'Historical Maximum Voc',
'690.7(A)(1)-NSRDB': 'Voc at 1 sun and mean yearly min ambient temperature from NSRDB',
'690.7(A)(1)-ASHRAE': 'Voc at 1 sun and mean yearly min ambient temperature from ASHRAE',
'690.7(A)(2)-ASHRAE': 'NEC 690.7a2 Voc, corrected by correction factor',
'690.7(A)(1)-DAY': 'Voc at 1 sun and mean yearly minimum daytime (GHI>150 W/m2) temperature',
# 'Norm_P99.5': 'P99.5 Voc assuming module normal to sun',
}
s_p99p5 = get_temp_irradiance_for_voc_percentile(df, percentile=99.5)
s_p100 = get_temp_irradiance_for_voc_percentile(df, percentile=100,
cushion=0.0001)
cell_temp = {
'690.7(A)(3)-P99.5': s_p99p5['temp_cell'],
'690.7(A)(3)-P100': s_p100['temp_cell'],
'690.7(A)(1)-DAY': mean_yearly_min_day_temp,
'690.7(A)(1)-NSRDB': mean_yearly_min_temp,
'690.7(A)(1)-ASHRAE': lowest_expected_temperature_ashrae,
'690.7(A)(2)-ASHRAE': lowest_expected_temperature_ashrae,
}
poa_irradiance = {
'690.7(A)(3)-P99.5': s_p99p5['effective_irradiance'],
'690.7(A)(3)-P100': s_p100['effective_irradiance'],
'690.7(A)(1)-DAY': 1000,
'690.7(A)(1)-NSRDB': 1000,
'690.7(A)(1)-ASHRAE': 1000,
'690.7(A)(2)-ASHRAE': 1000,
}
voc_summary['max_module_voltage'] = voc_summary.index.map(voc_values)
voc_summary['Conditions'] = voc_summary.index.map(conditions)
voc_summary['string_design_voltage'] = string_design_voltage
voc_summary['POA Irradiance'] = voc_summary.index.map(poa_irradiance)
voc_summary['Cell Temperature'] = voc_summary.index.map(cell_temp)
voc_summary['string_length'] = voc_summary['max_module_voltage'].map(
lambda x: voc_to_string_length(x, string_design_voltage, safety_factor))
max_module_voltage_with_safety_factor = voc_summary[
'max_module_voltage'] * (
1 + voc_summary[
'safety_factor'])
mean_yearly_min_temp = calculate_mean_yearly_min_temp(df.index,
df['temp_air'])
long_note = {
'690.7(A)(3)-P99.5': "99.5 Percentile Voc<br>" + \
"690.7(A)(3)-P99.5: {:.3f} V<br>".format(
voc_values['690.7(A)(3)-P99.5']) + \
"690.7(A)(3)-P99.5 + {:1.1%} SF: {:.3f} V<br>".format(
voc_summary['safety_factor'][
'690.7(A)(3)-P99.5'],
max_module_voltage_with_safety_factor[
'690.7(A)(3)-P99.5']) + \
"Maximum String Length: {:.0f}<br>".format(
voc_summary['string_length'][
'690.7(A)(3)-P99.5']) + \
"Recommended 690.7(A)(3) value for string length.",
'690.7(A)(3)-P100': 'Historical maximum Voc from {:.0f}-{:.0f}<br>'.format(
df['year'][0], df['year'][-1]) + \
'690.7(A)(3)-P100: {:.3f}<br>'.format(
voc_values['690.7(A)(3)-P100']) + \
"690.7(A)(3)-P100 + {:1.1%} SF: {:.3f} V<br>".format(
voc_summary['safety_factor'][
'690.7(A)(3)-P100'],
max_module_voltage_with_safety_factor[
'690.7(A)(3)-P100']) + \
'Maximum String Length: {:.0f}<br>'.format(
voc_summary['string_length'][
'690.7(A)(3)-P100']) + \
'Conservative 690.7(A)(3) value for string length.',
'690.7(A)(1)-DAY': 'Traditional daytime Voc, using 1 sun irradiance and<br>' + \
'mean yearly minimum daytime (GHI>150 W/m^2) dry bulb temperature of {:.1f} C.<br>'.format(
mean_yearly_min_day_temp) + \
'Day Voc: {:.3f} V<br>'.format(
voc_values['690.7(A)(1)-DAY']) + \
'Maximum String Length:{:.0f}<br>'.format(
voc_summary['string_length'][
'690.7(A)(1)-DAY']) + \
'Recommended 690.7(A)(1) Value',
'690.7(A)(1)-NSRDB': 'Traditional 690.7(A)(1) value, using 1 sun irradiance and<br>' + \
'mean yearly minimum dry bulb temperature of {:.1f} C.<br>'.format(
mean_yearly_min_temp) + \
'690.7(A)(1)-NSRDB: {:.3f}<br>'.format(
voc_values['690.7(A)(1)-NSRDB']) + \
'Maximum String Length: {:.0f}'.format(
voc_summary['string_length'][
'690.7(A)(1)-NSRDB']),
'690.7(A)(1)-ASHRAE': 'Traditional 690.7(A)(1) value<br>' + \
'using 1 sun irradiance and<br>' + \
'mean yearly minimum dry bulb temperature of {:.1f} C.<br>'.format(
lowest_expected_temperature_ashrae) + \
'Trad-ASHRAE-690.7a1 Voc: {:.3f}<br>'.format(
voc_values['690.7(A)(1)-ASHRAE']) + \
'Maximum String Length: {:.0f}'.format(
voc_summary['string_length'][
'690.7(A)(1)-ASHRAE']),
'690.7(A)(2)-ASHRAE': 'Traditional 690.7(A)(2) value<br>' + \
'using NEC derating table and<br>' + \
'mean yearly minimum dry bulb temperature of {:.1f} C.<br>'.format(
lowest_expected_temperature_ashrae) + \
'Trad-ASHRAE-690.7(A)(2) Voc: {:.3f}<br>'.format(
voc_values['690.7(A)(2)-ASHRAE']) + \
'Maximum String Length: {:.0f}'.format(
voc_summary['string_length'][
'690.7(A)(2)-ASHRAE']),
# 'Norm_P99.5': "Normal Voc, 99.5 percentile Voc value<br>".format(voc_values['Norm_P99.5']) +\
# "assuming array always oriented normal to sun.<br>" +\
# "Norm_P99.5 Voc: {:.3f}<br>".format(voc_values['Norm_P99.5']) +\
# "Maximum String Length: {:.0f}".format(voc_summary['string_length']['Norm_P99.5'])
}
short_note = {
'690.7(A)(3)-P99.5': "Recommended 690.7(A)(3) value for string length.",
'690.7(A)(3)-P100': 'Conservative 690.7(A)(3) value for string length.',
'690.7(A)(1)-DAY': 'Traditional design using daytime temp (GHI>150 W/m^2)',
'690.7(A)(1)-ASHRAE': 'Traditional design using ASHRAE and temperature coefficient',
'690.7(A)(1)-NSRDB': 'Traditional design using NSRDB and temperature coefficient',
'690.7(A)(2)-ASHRAE': 'Traditional design using ASHRAE and standard derating.',
# 'Norm_P99.5': ""
}
voc_summary['long_note'] = voc_summary.index.map(long_note)
voc_summary['short_note'] = voc_summary.index.map(short_note)
return voc_summary
def get_s3_csv(filename):
"""
"""
import boto3
# filename = '2017DesignConditions_s.xlsx.csv'
bucket = 'pvtools-nsrdb-pickle'
# connect to AWS S3
s3 = boto3.resource('s3')
obj = s3.Object(bucket, filename)
df = pd.read_csv(obj.get()['Body'])
return df
def scale_to_hours_per_year(y, info):
return y / info['timedelta_in_years'] * info['interval_in_hours']
def make_voc_histogram(df, info, number_bins=400):
# Voc histogram
voc_hist_y_raw, voc_hist_x_raw = np.histogram(df['v_oc'],
bins=np.linspace(
df['v_oc'].max() * 0.6,
df['v_oc'].max() + 1,
number_bins))
voc_hist_y = scale_to_hours_per_year(voc_hist_y_raw, info)[1:]
voc_hist_x = voc_hist_x_raw[1:-1]
return voc_hist_x, voc_hist_y
def make_simulation_summary(df, info, module_parameters, racking_parameters,
thermal_model, string_design_voltage, safety_factor,
ashrae='local_load'):
"""
Makes a text summary of the simulation.
Parameters
----------
info
module_parameters
racking_parameters
max_string_voltage
Returns
-------
"""
voc_summary = make_voc_summary(df, info, module_parameters,
string_design_voltage=string_design_voltage,
safety_factor=safety_factor,
ashrae=ashrae)
if type(thermal_model) == type(''):
thermal_model = {'Model parameters': thermal_model}
if 'Location ID' in info:
info['Location_ID'] = info['Location ID']
if 'Time Zone' in info:
info['local_time_zone'] = info['Time Zone']
# extra_parameters = calculate_extra_module_parameters(module_parameters)
voc_hist_x, voc_hist_y = make_voc_histogram(df, info, number_bins=200)
pd.DataFrame({'Voc': voc_hist_x, 'hours per year': voc_hist_y}).to_csv(
index=False)
summary = \
'Simulation Run Date,' + str(datetime.datetime.now()) + '\n\n' + \
'Weather data,\n' + \
pd.Series(info)[
['Source', 'Latitude', 'Longitude', 'Location_ID',
'local_time_zone',
'Elevation', 'Version', 'interval_in_hours',
'timedelta_in_years']].to_csv(header=False) + '\n' + \
'Module Parameters\n' + \
pd.Series(module_parameters).to_csv(header=False) + '\n' + \
'Racking Parameters\n' + \
pd.Series(racking_parameters).to_csv(header=False) + '\n' + \
'Thermal model\n' + \
'model type, Sandia\n' + \
pd.Series(thermal_model).to_csv(header=False) + '\n' + \
'String Design Voltage,' + str(string_design_voltage) + '\n' + \
'vocmaxlib Version,' + vocmax.__version__ + '\n' + \
'\nKey Voc Values\n' + \
voc_summary.to_csv() + \
'\nVoc Histogram\n' + \
pd.DataFrame(
{'Voc': voc_hist_x,
'hours per year': voc_hist_y}
).to_csv(index=False)
return summary
def calculate_normal_voc(poa_direct, poa_diffuse, temp_cell, module_parameters,
spectral_loss=1, aoi_loss=1, FD=1):
"""
Parameters
----------
poa_direct
poa_diffuse
temp_cell
module_parameters
spectral_loss
aoi_loss
FD
Returns
-------
"""
effective_irradiance = calculate_effective_irradiance(
poa_direct,
poa_diffuse,
spectral_loss=spectral_loss,
aoi_loss=aoi_loss,
FD=FD
)
v_oc = calculate_voc(effective_irradiance, temp_cell,
module_parameters)
return v_oc
# def calculate_effective_irradiance_bifacial(poa_direct_front,
# poa_diffuse_front,
# poa_direct_back,
# spectral_loss_front=1,
# spectral_loss_back=1,
# aoi_loss_front=1,
# FD_front=1):
# """
#
# Parameters
# ----------
# poa_direct
# poa_diffuse
# spectral_loss
# aoi_loss
# FD
#
# Returns
# -------
# effective_irradiance in W/m^2
#
# """
# # See pvlib.pvsystem.sapm_effective_irradiance for source of this line:
# effective_irradiance = spectral_loss_front * (
# poa_direct_front * aoi_loss_front + FD_front * poa_diffuse_front) + \
# spectral_loss_back*poa_back
#
# return effective_irradiance
def calculate_effective_irradiance(poa_direct, poa_diffuse, spectral_loss=1,
aoi_loss=1, FD=1):
"""
Parameters
----------
poa_direct
poa_diffuse
spectral_loss
aoi_loss
FD
Returns
-------
effective_irradiance in W/m^2
"""
# See pvlib.pvsystem.sapm_effective_irradiance for source of this line:
effective_irradiance = spectral_loss * (
poa_direct * aoi_loss + FD * poa_diffuse)
return effective_irradiance
def calculate_voc(effective_irradiance, temp_cell, module,
reference_temperature=25,
reference_irradiance=1000):
"""
Standard reference conditions are 1000 W/m2 and 25 C.
Parameters
----------
effective_irradiance
Irradiance in W/m^2
temperature
module_parameters
Dict or Series containing the fields:
'alpha_sc': The short-circuit current temperature coefficient of the
module in units of A/C.
'a_ref': The product of the usual diode ideality factor (n,
unitless), number of cells in series (Ns), and cell thermal voltage
at reference conditions, in units of V
'I_L_ref': The light-generated current (or photocurrent) at reference
conditions, in amperes.
'I_o_ref': The dark or diode reverse saturation current at reference
conditions, in amperes.
'R_sh_ref': The shunt resistance at reference conditions, in ohms.
'R_s': The series resistance at reference conditions, in ohms.
'Adjust': The adjustment to the temperature coefficient for short
circuit current, in percent.
model : str
Model to use, can be 'cec' or 'desoto'
XX
Returns
-------
References
----------
[1] <NAME>, “An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model”, Journal of
Solar Energy Engineering, vol 134, 2012.
"""
if (not 'iv_model' in module) or module['iv_model'] == 'sapm':
v_oc = sapm_voc(effective_irradiance, temp_cell, module,
reference_temperature=reference_temperature,
reference_irradiance=reference_irradiance)
elif module['iv_model'] in ['cec', 'desoto']:
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance, temp_cell, module)
# out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth,
# method='newton')
v_oc = pvlib.singlediode.bishop88_v_from_i(0,
photocurrent,
saturation_current,
resistance_series,
resistance_shunt,
nNsVth,
method='newton')
else:
raise Exception('iv_model not recognized')
return v_oc
def singlediode_voc(effective_irradiance, temp_cell, module_parameters):
"""
Calculate voc using the singlediode model.
Parameters
----------
effective_irradiance
temp_cell
module_parameters
Returns
-------
"""
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance, temp_cell,
module_parameters)
# out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth,
# method='newton')
v_oc = pvlib.singlediode.bishop88_v_from_i(0,
photocurrent,
saturation_current,
resistance_series,
resistance_shunt,
nNsVth,
method='newton')
return v_oc
def sapm_voc(effective_irradiance, temp_cell, module, reference_temperature=25,
reference_irradiance=1000):
"""
This function differs from the PVLIB version in that the effective
irradiance is in W/m2.
Parameters
----------
effective_irradiance : numeric
Effective irradiance in W/m^2
temp_cell : numeric
module : dict
parameters are:
'Voco'
'cells_in_series'
'Bvoco'
'Mbvoc'
reference_temperature : float
reference_irradiance : float
Returns
-------
"""
T0 = reference_temperature
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64')
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
# Bvmpo = module['Bvmpo'] + module['Mbvmp'] * (1 - Ee)
if 'Mbvoc' in module:
Bvoco = module['Bvoco'] + module['Mbvoc'] * (1 - Ee)
else:
Bvoco = module['Bvoco']
delta = module['n_diode'] * kb * (temp_cell + 273.15) / q
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee / reference_irradiance, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
# avoid repeated __getitem__
cells_in_series = module['cells_in_series']
v_oc = np.maximum(0, (
module['Voco'] + cells_in_series * delta * logEe +
Bvoco * (temp_cell - T0)))
return v_oc
def sapm_temperature_to_get_voc(effective_irradiance,
Voc,
Voco,
Bvoco,
diode_factor,
cells_in_series,
Mbvoc=0,
reference_temperature=25,
reference_irradiance=1000
):
"""
Calculate the cell temperature to achieve a certain Voc at a value of
effective irradiance.
Parameters
----------
effective_irradiance
Voc
Voco
Bvoco
diode_factor
cells_in_series
Mbvoc
reference_temperature
reference_irradiance
Returns
-------
"""
T0 = reference_temperature
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64')
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee / reference_irradiance, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
Bvoco = Bvoco + Mbvoc * (1 - Ee)
delta_ref = diode_factor * kb * (reference_temperature + 273.15) / q
delta_prime = diode_factor * kb / q
temperature_cell = reference_temperature + (
Voc - Voco - cells_in_series * delta_ref * logEe) / (
cells_in_series * delta_prime * logEe + Bvoco)
return temperature_cell
def sapm_mpp(effective_irradiance, temperature, module_parameters):
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance, temperature,
module_parameters)
i_mp, v_mp, p_mp = pvlib.singlediode.bishop88_mpp(photocurrent,
saturation_current,
resistance_series,
resistance_shunt, nNsVth,
method='newton')
return i_mp, v_mp, p_mp
def calcparams_singlediode(effective_irradiance, temperature,
module_parameters):
# Default to desoto model.
if not 'iv_model' in module_parameters.keys():
module_parameters['iv_model'] = 'desoto'
if module_parameters['iv_model'] == 'sapm':
module_parameters['iv_model'] = 'desoto'
if module_parameters['iv_model'] == 'desoto':
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
pvlib.pvsystem.calcparams_desoto(effective_irradiance,
temperature,
module_parameters['alpha_sc'],
module_parameters['a_ref'],
module_parameters['I_L_ref'],
module_parameters['I_o_ref'],
module_parameters['R_sh_ref'],
module_parameters['R_s']
)
elif module_parameters['iv_model'] == 'cec':
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
pvlib.pvsystem.calcparams_cec(effective_irradiance,
temperature,
module_parameters['alpha_sc'],
module_parameters['a_ref'],
module_parameters['I_L_ref'],
module_parameters['I_o_ref'],
module_parameters['R_sh_ref'],
module_parameters['R_s'],
module_parameters['Adjust'],
)
else:
raise Exception(
"module_parameters['iv_model'] must be 'cec' or 'desoto'")
return photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth
def calculate_iv_curve(effective_irradiance, temperature, module_parameters,
ivcurve_pnts=200):
"""
:param effective_irradiance:
:param temperature:
:param module_parameters:
:param ivcurve_pnts:
:return:
"""
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance, temperature,
module_parameters)
iv_curve = pvlib.pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt,
nNsVth,
ivcurve_pnts=ivcurve_pnts,
method='lambertw')
return iv_curve
def calculate_sapm_module_parameters(module_parameters,
reference_irradiance=1000,
reference_temperature=25):
"""
Calculate standard parameters of modules from the single diode model.
module_parameters: dict
Returns
Dict of parameters including:
'Voco' - open circuit voltage at STC.
'Bvoco' - temperature coefficient of Voc near STC, in V/C
Isco - short circuit current at STC
alpha_sc - temperature coefficient of Isc near STC, in A/C
Vmpo - voltage at maximum power point at STC, in V
Pmpo - power at maximum power point at STC, in W
Impo - current at maximum power point at STC, in A
Bpmpo - temperature coefficient of maximum power near STC, in W/C
"""
param = {}
param['cells_in_series'] = module_parameters['N_s']
kB = 1.381e-23
q = 1.602e-19
Vthref = kB * (273.15 + 25) / q
param['n_diode'] = module_parameters['a_ref'] / (
module_parameters['N_s'] * Vthref)
# Calculate Voc vs. temperature for finding coefficients
temp_cell_smooth = np.linspace(reference_temperature - 5,
reference_temperature + 5, 5)
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance=reference_irradiance,
temperature=temp_cell_smooth,
module_parameters=module_parameters)
iv_points = pvlib.pvsystem.singlediode(photocurrent,
saturation_current,
resistance_series, resistance_shunt,
nNsVth)
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(
effective_irradiance=reference_irradiance,
temperature=reference_temperature,
module_parameters=module_parameters)
iv_points_0 = pvlib.pvsystem.singlediode(photocurrent,
saturation_current,
resistance_series,
resistance_shunt, nNsVth)
param['Voco'] = iv_points_0['v_oc']
# param['Voco'] = module_parameters['V_oc_ref']
param['Isco'] = iv_points_0['i_sc']
# param['Isco'] = module_parameters['I_sc_ref']
param['Impo'] = iv_points_0['i_mp']
param['Vmpo'] = iv_points_0['v_mp']
param['Pmpo'] = iv_points_0['p_mp']
# param['Ixo'] = iv_points_0['i_x']
# param['Ixxo'] = iv_points_0['i_xx']
voc_fit_coeff = np.polyfit(temp_cell_smooth, iv_points['v_oc'], 1)
param['Bvoco'] = voc_fit_coeff[0]
pmp_fit_coeff = np.polyfit(temp_cell_smooth, iv_points['p_mp'], 1)
param['Bpmpo'] = pmp_fit_coeff[0]
isc_fit_coeff = np.polyfit(temp_cell_smooth, iv_points['i_sc'], 1)
param['alpha_sc'] = isc_fit_coeff[0]
param['Mbvoc'] = 0
param['FD'] = 1
param['iv_model'] = 'sapm'
# description = {
# 'Voco':'Open circuit voltage at STC (V)',
# 'Isco':'Short circuit current at STC (A)',
# 'Impo':'Max power current at STC (A)',
# 'Vmpo':'Max power voltage at STC (V)',
# 'Pmpo':'Max power power at STC (W)',
# 'Bvoco':'Temperature coeff. of open circuit voltage near STC (V/C)',
# 'Bpmpo':'Temperature coeff. of max power near STC (W/C)',
# 'Bisco':'Tempearture coeff. of short circuit current near STC (A/C)',
# 'cells_in_series': 'Number of cells in series',
# 'n_diode': 'diode ideality factor',
#
# }
#
# sapm_module = pd.DataFrame(
# index= list(param.keys()),
# columns=['Parameter','Value','Description'])
#
# sapm_module['Parameter'] = sapm_module.index
# sapm_module['Value'] = sapm_module.index.map(param)
# sapm_module['Description'] = sapm_module.index.map(description)
#
return param
def cec_to_sapm(module, reference_irradiance=1000, reference_temperature=25):
"""
Parameters
----------
module : dict or series
CEC module parameters.
'alpha_sc': temperature coefficient of short-circuit current near
reference conditions, in A/C.
reference_irradiance
reference_temperature
Returns
-------
sapm : dict or series
'alpha_sc': temperature coefficient of short-circuit current near
reference conditions, in A/C
"""
# Calculate sapm parameters.
sapm = calculate_sapm_module_parameters(module,
reference_irradiance=reference_irradiance,
reference_temperature=reference_temperature)
# Replace certain parameters with those explicitly specified.
if np.abs((sapm['Bvoco'] - module['beta_oc']) / sapm['Bvoco']) > 0.25:
warnings.warn(
'Inconsistency found in Bvoco, suggest to check datasheet. ')
sapm['Bvoco'] = module['beta_oc']
sapm['Voco'] = module['V_oc_ref']
sapm['Isco'] = module['I_sc_ref']
sapm['Impo'] = module['I_mp_ref']
sapm['Vmpo'] = module['V_mp_ref']
# Note that
alpha_sc_Amp_per_C = module['alpha_sc']
if np.abs(
(sapm['alpha_sc'] - alpha_sc_Amp_per_C) / sapm['alpha_sc']) > 0.25:
warnings.warn(
"From Desoto model, find alpha_sc = {:1.3f} A/C, but value in CEC database is {:1.3f} A/C".format(
sapm['alpha_sc'], alpha_sc_Amp_per_C))
sapm['alpha_sc'] = alpha_sc_Amp_per_C
sapm['Pmpo'] = module['I_mp_ref'] * module['V_mp_ref']
sapm['efficiency'] = module['I_mp_ref'] * \
module['V_mp_ref'] / \
module['A_c'] / 1000
return sapm
# raise Warning('Inconsistency found in Bvoco, suggest to check datasheet. ')
#
#
# def calculate_sapm_module_parameters_df(module_parameters,reference_irradiance=1000,
# reference_temperature=25):
# """
#
# Calculate standard parameters of modules from the single diode model.
#
# module_parameters: dict
#
# Returns
#
# Dict of parameters including:
#
# 'Voco' - open circuit voltage at STC.
#
# 'Bvoco' - temperature coefficient of Voc near STC, in V/C
#
# Isco - short circuit current at STC
#
# Bisco - temperature coefficient of Isc near STC, in A/C
#
# Vmpo - voltage at maximum power point at STC, in V
#
# Pmpo - power at maximum power point at STC, in W
#
# Impo - current at maximum power point at STC, in A
#
# Bpmpo - temperature coefficient of maximum power near STC, in W/C
#
#
# """
#
# param = calculate_sapm_module_parameters(module_parameters,
# reference_irradiance=reference_irradiance,
# reference_temperature=reference_temperature)
#
# description = {
# 'Voco':'Open circuit voltage at STC (V)',
# 'Isco':'Short circuit current at STC (A)',
# 'Impo':'Max power current at STC (A)',
# 'Vmpo':'Max power voltage at STC (V)',
# 'Pmpo':'Max power power at STC (W)',
# 'Bvoco':'Temperature coeff. of open circuit voltage near STC (V/C)',
# 'Bpmpo':'Temperature coeff. of max power near STC (W/C)',
# 'Bisco':'Tempearture coeff. of short circuit current near STC (A/C)'
# }
#
# extra_parameters = pd.DataFrame(
# index= list(param.keys()),
# columns=['Parameter','Value','Description'])
#
# extra_parameters['Parameter'] = extra_parameters.index
# extra_parameters['Value'] = extra_parameters.index.map(param)
# extra_parameters['Description'] = extra_parameters.index.map(description)
#
#
# return extra_parameters
def calculate_mean_yearly_min_temp(datetimevec, temperature):
"""
Calculate the mean of the yearly minimum temperatures.
Parameters
----------
datetimevec
datetime series giving times corresponding to the temperatures
temperature
series of temperatures
Returns
-------
mean of yearly minimum temperatures.
"""
years = list(set(datetimevec.year))
yearly_min_temp = []
for j in years:
yearly_min_temp.append(
temperature[datetimevec.year == j].min()
)
return np.mean(yearly_min_temp)
def get_temp_irradiance_for_voc_percentile(df, percentile=99.5, cushion=0.0025):
"""
Find the lowest temperature and associated irradiance that produces the
percentile value of Voc.
Parameters
----------
df : dataframe
Dataframe containing 'v_oc'
percentile
cushion : numeric
Returns
-------
Series
Lowest
"""
Pvoc = np.nanpercentile(np.array(df['v_oc']), percentile,
interpolation='nearest')
df_close = df[df['v_oc'] > Pvoc * (1 - cushion)]
df_close = df_close[df_close['v_oc'] < Pvoc * (1 + cushion)]
if len(df_close['temp_air']) > 0:
i_near = df_close['temp_air'].idxmin()
else:
i_near = abs(df['v_oc'] - Pvoc).idxmin()
return df.iloc[df.index.get_loc(i_near)]
def voc_to_string_length(voc, max_string_voltage, safety_factor):
"""
Returns the maximum number N modules with open circuit voltage voc that
do not exceed max_string_voltage when connected in series.
Parameters
----------
voc : float
Open circuit voltage
max_string_voltage : float
Maximum string voltage
safety_factor : float
safety factor for string length.
Returns
-------
N : float
Maximum string length
"""
if voc == 0:
string_length = np.nan
else:
string_length = np.round(
np.floor(max_string_voltage * (1 - safety_factor) / voc))
return string_length
def simulate_system_sandia(weather, info, module_parameters=None,
system_parameters=None):
"""
Use the PVLIB Sandia model to calculate max voc.
:param weather:
:param info:
:param module_parameters:
:param system_parameters:
:return:
"""
cec_inverters = pvlib.pvsystem.retrieve_sam('cecinverter')
inverter_parameters = cec_inverters[
'Power_Electronics__FS1700CU15__690V__690V__CEC_2018_']
# Set location
location = pvlib.location.Location(latitude=info['Latitude'][0],
longitude=info['Longitude'][0])
# Weather must have field dni, dhi, ghi, temp_air, and wind_speed.
# Make pvsystem
if system_parameters['mount_type'].lower() == 'fixed_tilt':
system = pvlib.pvsystem.PVSystem(
module_parameters=module_parameters,
inverter_parameters=inverter_parameters,
surface_tilt=system_parameters['surface_tilt'],
surface_azimuth=system_parameters['surface_azimuth'],
)
elif system_parameters['mount_type'].lower() == 'single_axis_tracker':
system = pvlib.tracking.SingleAxisTracker(
module_parameters=module_parameters,
inverter_parameters=inverter_parameters,
axis_tilt=system_parameters['axis_tilt'],
axis_azimuth=system_parameters['axis_azimuth'],
max_angle=system_parameters['max_angle'],
backtrack=system_parameters['backtrack'],
gcr=system_parameters['ground_coverage_ratio']
)
# print(system_parameters['surface_tilt'])
mc = pvlib.modelchain.ModelChain(system, location)
mc.system.racking_model = system_parameters['racking_model']
# mc.complete_irradiance(times=weather.index, weather=weather)
mc.run_model(times=weather.index, weather=weather)
df = weather
df['v_oc'] = mc.dc.v_oc
df['temp_cell'] = mc.temps['temp_cell']
return (df, mc)
def import_nsrdb_csv(filename):
"""Import an NSRDB csv file.
The function (df,info) = import_csv(filename) imports an NSRDB formatted
csv file
Parameters
----------
filename
Returns
-------
df
pandas dataframe of data
info
pandas dataframe of header data.
"""
# filename = '1ad06643cad4eeb947f3de02e9a0d6d7/128364_38.29_-122.14_1998.csv'
info_df = pd.read_csv(filename, nrows=1)
info = {}
for p in info_df:
info[p] = info_df[p].iloc[0]
# See metadata for specified properties, e.g., timezone and elevation
# timezone, elevation = info['Local Time Zone'], info['Elevation']
# Return all but first 2 lines of csv to get data:
df = pd.read_csv(filename, skiprows=2)
# Set the time index in the pandas dataframe:
year = str(df['Year'][0])
if np.diff(df[0:2].Minute) == 30:
interval = '30'
info['interval_in_hours'] = 0.5
df = df.set_index(
pd.date_range('1/1/{yr}'.format(yr=year), freq=interval + 'Min',
periods=60 * 24 * 365 / int(interval)))
elif df['Minute'][1] - df['Minute'][0] == 0:
interval = '60'
info['interval_in_hours'] = 1
df = df.set_index(
pd.date_range('1/1/{yr}'.format(yr=year), freq=interval + 'Min',
periods=60 * 24 * 365 / int(interval)))
else:
print('Interval not understood!')
df.index = df.index.tz_localize(
pytz.FixedOffset(float(info['Time Zone'] * 60)))
return (df, info)
# df, info = import_csv('nsrdb_1degree_uv/104_30.97_-83.22_tmy.csv')
def import_nsrdb_sequence(folder):
"""Import and append NSRDB files in a folder
Import a sequence of NSRDB files, data is appended to a pandas dataframe.
This is useful for importing all years of data from one folder.
Parameters
----------
folder
directory containing files to import.
Returns
-------
df
pandas dataframe of data
info
pandas dataframe of header data for last file imported.
"""
# Get all files.
files = glob.glob(os.path.join(folder, '*.csv'))
if len(files) == 0:
raise ValueError('No input files found in directory')
files.sort()
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series(pd.to_datetime(['2016'], utc=True)),
pd.period_range('2016', freq='A', periods=3)]
for arr in date_data:
assert as_json_table_type(arr) == 'datetime'
def test_as_json_table_type_string_data(self):
strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])]
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_data(self):
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any'
assert as_json_table_type(pd.CategoricalIndex([1])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
# ------
# dtypes
# ------
def test_as_json_table_type_int_dtypes(self):
integers = [np.int, np.int16, np.int32, np.int64]
for t in integers:
assert as_json_table_type(t) == 'integer'
def test_as_json_table_type_float_dtypes(self):
floats = [np.float, np.float16, np.float32, np.float64]
for t in floats:
assert as_json_table_type(t) == 'number'
def test_as_json_table_type_bool_dtypes(self):
bools = [bool, np.bool]
for t in bools:
assert as_json_table_type(t) == 'boolean'
def test_as_json_table_type_date_dtypes(self):
# TODO: datedate.date? datetime.time?
dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(),
DatetimeTZDtype('ns', 'US/Central')]
for t in dates:
assert as_json_table_type(t) == 'datetime'
def test_as_json_table_type_timedelta_dtypes(self):
durations = [np.timedelta64, np.dtype("<m8[ns]")]
for t in durations:
assert
|
as_json_table_type(t)
|
pandas.io.json.table_schema.as_json_table_type
|
import matplotlib.pyplot as plt
import math
from modules.Simpson_BTK import BTK_Diff
import pandas as pd
def Dataplot(parameter,T,df2,my_xaxis,my_yaxis,plot_name):
print("Using btk!")
V = df2['Vdc'].values
G= BTK_Diff(parameter,df2[my_xaxis],T)
G_min = min(min(G),min(df2[my_yaxis]))
G_max = max(max(G),max(df2[my_yaxis]))
Internal = (G_max - G_min)/10
X_Position = max(df2[my_xaxis])/4
plt.plot(df2[my_xaxis],df2[my_yaxis],label = 'Exp')
plt.plot(df2[my_xaxis],G,label = 'G/GN Theory')
plt.text(X_Position, G_min + 4*Internal, 'Temperature:'+str(round(T,4)), fontsize=13)
plt.text(X_Position, G_min + 3*Internal, 'Delta:'+str(round(parameter[0],4)), fontsize=13)
plt.text(X_Position, G_min + 2*Internal, 'Gama:'+str(round(parameter[1],4)), fontsize=13)
plt.text(X_Position, G_min + 1*Internal, 'Barrier Height:'+str(round(parameter[2],4)), fontsize=13)
plt.text(X_Position, G_min + 0*Internal, 'Spin Polarization:'+str(round(parameter[3],4)), fontsize=13)
plt.legend(loc = 1)
plt.axis('tight')
plt.xlabel(my_xaxis)
plt.ylabel(my_yaxis)
#Change Name
plot_name = plot_name[:-4]
plt.title(plot_name)
# Savefig
plt.savefig(plot_name + '.png')
# Save Data
data = {'V': V, 'G/GN': G}
data_df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_record_batch(tsc,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
class Test_Parquet_IO:
def test_from_parquet_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pq.write_table(table,'test.parquet')
testData = from_parquet('test.parquet','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.parquet')
def test_from_parquet_collection(self,dictList_collection):
data = dictList_collection
df =
|
pd.DataFrame(dictList_collection)
|
pandas.DataFrame
|
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """
import sys
from rowgenerators.exceptions import SourceError, RowGeneratorError
from rowgenerators.source import Source
from rowgenerators.appurl import parse_app_url
import pandas as pd
import numpy as np
def iterate_pandas(df):
if len(df.index.names) == 1 and df.index.names[0] is None and df.index.dtype != np.dtype('O'):
# For an unnamed, single index, assume that it is just a row number
# and we don't really need it
yield list(df.columns)
for index, row in df.iterrows():
yield list(row)
else:
# Otherwise, either there are more than
index_names = [n if n else "index{}".format(i) for i, n in enumerate(df.index.names)]
yield index_names + list(df.columns)
if len(df.index.names) == 1:
idx_list = lambda x: [x]
else:
idx_list = lambda x: list(x)
for index, row in df.iterrows():
yield idx_list(index) + list(row)
def to_codes(df):
"""Return a dataframe with all of the categoricals represented as codes"""
df = df.copy()
cat_cols = df.select_dtypes(['category']).columns
df[cat_cols] = df[cat_cols].apply(lambda x: x.cat.codes)
return df
def make_cat_map(df, column):
"""Extract the mapping between category names and codes from the dataset. There should be an interface
on the categorical index type for this map, but I could not find it. """
t = pd.DataFrame( {'codes': df[column].cat.codes, 'values': df[column]} ).drop_duplicates().sort_values('codes')
return { e.codes:e.values for e in list(t.itertuples())}
def extract_categories(fspath):
"""
Create a Metatab doc with the schema for a CHIS file, including the categorical values.
:param fspath: Path to the stata file.
:return:
"""
dfs =
|
pd.read_stata(fspath)
|
pandas.read_stata
|
import streamlit as st # Make Frontend
import pymongo # Database connection
from pymongo import MongoClient # Access database url
import pandas as pd # Basic dataframe operations
import pdfplumber # Plumb pdf for visual debugging and data extraction including table data
import PyPDF2 # To scan pdf documents
from rake_nltk import Rake # Simple Key extraction
import string # String operations
import io # Convert a binary resume file to decoded file readable by python
import re # Regular expression
import nltk # Natural Language toolkit
nltk.download('stopwords')
nltk.download('punkt')
import lxml # most feature-rich and easy-to-use library for processing XML and HTML
country = st.sidebar.text_input('Country')
# Each element that's passed to st.sidebar is pinned to the left.
# text_input display a single-line text input widget.
uploaded_file = st.file_uploader('Upload your resume')
# file_uploader uploades files are limited to 200MB.
file_text = ''
# To save file data
phrases = []
# takes phrases
# keyphrases function
def keyphrases(file, min_word, max_word, num_phrases):
text = file
text = text.lower()
# lowercase text inputted
text = ''.join(s for s in text if ord(s) > 31 and ord(s) < 126)
# ord(s) = 31 is Space
# ord(s) = 125 is right curly brace
# using join() function to join text
text = text
text = re.sub(' +', ' ', text)
# Replace multiple space with single space
text = text.translate(str.maketrans('', '', string.punctuation))
# maketrans-specify the list of characters that need to be replaced in the whole string
# translate() method in Python for making many character replacements in strings.
text = ''.join([i for i in text if not i.isdigit()])
# join if not a digit
r = Rake(min_length=min_word, max_length=max_word)
# extract keywords of size entered to the function
r.extract_keywords_from_text(text)
# Extraction given the text.
phrases = r.get_ranked_phrases()
# To get keyword phrases ranked highest to lowest.
if num_phrases < len(phrases):
phrases = phrases[0:num_phrases]
# only keep phrases of length specified earlier
return phrases
# returns key phrases
# check if file has any data
if uploaded_file is not None:
uploaded_file.seek(0)
# Top of pdf file
file = uploaded_file.read()
# Read the file data
pdf = PyPDF2.PdfFileReader(io.BytesIO(file))
# Convert it into python readable format from binary form
for page in range(pdf.getNumPages()):
# getNumPages for calculating the number of pages in this PDF file.
file_text += (pdf.getPage(page).extractText())
# getPage for retrieving a page by number from this PDF file.
# extractText for extracting text
phrases.extend(keyphrases(file_text, 2, 4, 10))
# Extend add return value in phrases after function is run
if len(phrases) > 0:
q_terms = st.multiselect('Select key phrases',options=phrases,default=phrases) #display keywords
#mongo-connection
client = pymongo.MongoClient("mongodb+srv://deepm12:<EMAIL>/Job_Recommender?retryWrites=true&w=majority")
def query(country,keywords):
result = client['JobRecommender']['Companies'].aggregate([
{
'$search': {
'text': {
'path': [
'industry'
],
'query': [
' %s' % (keywords)
],
'fuzzy': {
'maxEdits': 2,
'prefixLength': 2
}
}
}
}, {
'$project': {
'Name': '$name',
'Industry': '$industry',
'City': '$locality',
'Country': '$country',
'score': {
'$meta': 'searchScore'
}
}
}, {
'$match': {
'Country': '%s' % (country)
}
}, {
'$limit': 10
}
])
df =
|
pd.DataFrame(result)
|
pandas.DataFrame
|
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from gensim.models.ldamulticore import LdaMulticore
# load the model from disk
filename = 'models/trained_lda.sav'
ldamodel = LdaMulticore.load(filename)
filename = 'models/trained_lda_corpus.sav'
corpus = pickle.load(open(filename, 'rb'))
#load the files
data_files = ["data/pubmed_articles_cancer_01_smaller.csv", "data/pubmed_articles_cancer_02_smaller.csv",
"data/pubmed_articles_cancer_03_smaller.csv","data/pubmed_articles_cancer_04_smaller.csv"]
input_data = pd.DataFrame()
print("load the files")
for file in data_files:
df_load = pd.read_csv(file,skip_blank_lines=True)
input_data = input_data.append(df_load)
input_data.abstract = input_data.abstract.astype('str')
dates_df = input_data.copy()
dates_df = dates_df[["pubmed_id", "created_date"]]
#dates_df.to_csv("dates.csv", index=False)
# print("get weights")
# https://stackoverflow.com/questions/62174945/gensim-extract-100-most-representative-documents-for-each-topic
topic_probs = ldamodel.get_document_topics(corpus) #get the list of topic probabilities by doc
topic_dict = [dict(x) for x in topic_probs] #convert to dictionary to convert to data frame
df = pd.DataFrame(topic_dict).fillna(0) #convert to data frame, fill topics < 0.01 as 0
df = df.reindex(sorted(df.columns), axis=1)
columns_names = ["infection risk", "thyroid cancer", "safety and efficacy", "leukemia chemotherapy", "surgical intervention",
"lymph nodes detection", "pain management", "cervical cancer", "bladder cancer", "risk prediction",
"adjuvant therapy", "healthy habits", "hematologic toxicity", "surgical complications", "tumor angiogenesis",
"Intraoperative Radiation Therapy", "radiotherapy", "stem cell transplantation", "glioma", "behavioral intervention",
"prostate cancer"]
df.columns = columns_names
df.to_csv("temp.csv", index=False)
#df = pd.read_csv("temp.csv")
df.reset_index(inplace=True)
dates_df.reset_index(inplace=True)
transformed_df = dates_df.join(df)
#EDA: Very little data before 1975
transformed_df['created_date'] =
|
pd.to_datetime(dates_df['created_date'], format="%m/%d/%Y %H:%M")
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 08:08:52 2016
@author: tkc
"""
import re
from collections import defaultdict
import pandas as pd
import numpy as np
from scipy import optimize
from math import factorial # used by Savgol matrix
from scipy.optimize import curve_fit
import os
#%%
def compareavg_subs(Integcomp, Integcompsubs):
''' Compare compositions for integral method on same samples derived from avg-combined files named 173175
and the sub-spe files 173,174,175, single line for each'''
combofiles=Integcomp[Integcomp['Filenumber']>3000] # this is core of returned df
tempcols=np.ndarray.tolist(combofiles.columns.unique())
elemlist=[col for col in tempcols if '%' in col]
elemlist=[col for col in tempcols if '%' in col and 'err' not in col]
elemlist=[el.replace('%','') for el in elemlist] # regenerate element list
# make merge1, merge2 and merge3 cols for pd.merge
# Create copies of Integcompsubs and rename Filenumber to merge1,2,3
# This allows pd merge on merge col, areanumber (since sample might not be unique)
# determine # of underlying sub spe files (assumed same for all in combofiles)
filestr=str(int((combofiles.iloc[0]['Filenumber'])))
firstnum=int(filestr[0:int((len(filestr)/2))])
lastnum=int(filestr[int((len(filestr)/2)):])
for i in range(1,(lastnum-firstnum+2)):
nummerges=lastnum-firstnum+1
colname='Merge'+str(i)
combofiles[colname]='' # add correct number of merge columns
for index, row in combofiles.iterrows():
filestr=str(int((combofiles.loc[index]['Filenumber'])))
firstnum=int(filestr[0:int((len(filestr)/2))])
lastnum=int(filestr[int((len(filestr)/2)):])
# maybe need to associate file append string to source file
filerange=[]
filerange.extend(range(firstnum,lastnum+1))
# deal with possible irregular combo file structure (assumes same number of averaged sub spe files)
if len(filerange)!=nummerges:
print('Different number of average sub spefiles for ', filestr)
if len(filerange)!=nummerges: # truncate if too long to avoid error
filerange=filerange[0:nummerges+1]
# Now assign correct filenumber for each merge
for i, val in enumerate(filerange):
colname='Merge'+str(i+1)
combofiles=combofiles.set_value(index, colname, val) # assign correct filenumber to each merge (cols already created)
# now read for n merges with compositional data from
# prepare compsubs for merge
dropcols=['Project', 'Filename', 'FilePath', 'Sample', 'Comments']
dropcollist=[s for s in Integcompsubs.dtypes.index if s in dropcols] # ensure the drop col is actually present
Integcompsubs=Integcompsubs.drop(dropcollist, axis=1)
for i in range(1,nummerges+1):
colname='Merge'+str(i)
tempdf=Integcompsubs
tempdf=tempdf.rename(columns={'Filenumber':colname})
combofiles=pd.merge(combofiles,tempdf, how='left', on=[colname,'Areanumber'], suffixes=('',str(i)))
# create and return a subset of compositional comparison
mycols=['Filenumber', 'Areanumber', 'Sample','Filename','AESbasis']
# now make average and stdev from composition of sub spes for comparison with average
# Now compute avg and stdev of elemental bases and at. % compositions
# For integquant elem is counts modified by kfactors (fundamental quantity)
for i, elem in enumerate(elemlist):
mycols.extend([elem,'%'+elem]) # add to truncated output
numrange=[str(i) for i in range(1,nummerges+1)]
collist=[elem+val for val in numrange]
# average amplitudes for each element
newcol=elem+'avg'
mycols.extend([newcol])
combofiles[newcol]=combofiles[collist].mean(axis=1) # e.g. averages S1, S2, S3 into Savg
newcol=elem+'stdev'
mycols.extend([newcol])
combofiles[newcol]=combofiles[collist].std(axis=1)
# now compute average at.% for each element
collist=['%'+elem+val for val in numrange]
newcol='%'+elem+'avg'
mycols.extend([newcol])
combofiles[newcol]=combofiles[collist].mean(axis=1) # avg of at. %
newcol='%'+elem+'stdev'
mycols.extend([newcol])
combofiles[newcol]=combofiles[collist].std(axis=1) # avg for stdev
# Output a smaller subset of
compslice=combofiles[mycols]
return compslice,combofiles
def parseelemlist(elemlist):
'''Find and separate multielement peaks to be averaged (e.g. Fe2 & Fe) from longer string of element peaks
e.g. splits "Mg Fe Fe2 Si" into "Mg Si" and "{Fe,[Fe,Fe2]} dictionary'''
# Strip numbers from strings within list
newlist=[re.match('\D+',i).group(0) for i in elemlist]
# find duplicated peaks (multiple peaks per element)
Multielem = defaultdict(list)
for i, item in enumerate(newlist):
Multielem[item].append(i)
Multielem = {k:v for k,v in Multielem.items() if len(v)>1} # dictionary with duplicated item and list with indices
duplist=list(Multielem.values()) # get list
duplist=[item for sublist in duplist for item in sublist] # single list with positions of duplicated elements
# now alter multipeak elements list to give dict with element and then list of peak for that element
for key,value in Multielem.items():
templist=value # dictionary value is list of elem peak index positions
peaklist=[]
for i, index in enumerate(templist): # create new list with original elem peak from index positions
peaklist.append(elemlist[index])
# now replace list of index positions with elempeak names
Multielem.update({key:peaklist}) # key will be multipeak element string i.e. "Fe"
# finally construct new single elements list with multipeak ones removed (handle each separately)
newelemlist=[]
for i in range(0,len(elemlist)):
if i not in duplist:
newelemlist.append(elemlist[i])
return newelemlist, Multielem
def parseelem2(elemlist, Multielem):
''' After multielement peaks removed, also move secondary peaks used as primary to dict (handle separately)
e.g. splits "S Mg Fe2 Si" into "S Mg Si" and "{Fe,[Fe2]} dictionary; same structure and df output
for averaging of Fe, Fe2, or straight Fe2 or straight Fe'''
# starting elemlist will only have single entries (i.e Ti2 but not Ti & Ti2)
newelemlist=[]
for i, elem in enumerate(elemlist):
if re.search(r'\d',elem): # has number
match=re.search(r'\d',elem)
newkey=elem[0:match.start()]
# store alt quant (i.e. on Ti2) with same structure as multiple quant (Ti & Ti2)
# Another entry in multielement list... makes things easier for later quant comparisons
templist=[] # peakIDs added as list (of length 1)
templist.append(elem) # list containing single string (keeps identical data structure)
Multielem.update({newkey:templist}) # add to existing dictionary for separate handling
else:
newelemlist.append(elemlist[i]) # just copy over
return newelemlist, Multielem # return altered element list and multielem dictionary
def getelemthresholds(elemlist, AESquantparams):
'''get element-dependent significance thresholds for each peak from AESquantparams
return dictionary with element and associated significance level'''
thresholds={} # returns list of element dependent thresholds for this element set
for i, elem in enumerate(elemlist):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thresholds.update({elem:thiselemdata.siglevel})
return thresholds
def cloneparamrows(df):
''' Make param log entry for for each areanum - used by calccomposition to correctly process spe files with multiple spatial areas
passed df is usually list of spe files
this solves problem that AugerParamLog only has one entry (despite possibly having multiple distinct areas with different spectra'''
df['Areanumber']=1 # set existing entries as area 1
mycols=df.dtypes.index
newrows=pd.DataFrame(columns=mycols) # blank df for new entries
for index, row in df.iterrows():
numareas=int(df.loc[index]['Areas'])
for i in range(2,numareas+1):
newrow=df.loc[index] # clone this row as series
newrow=newrow.set_value('Areanumber',i)
newrows=newrows.append(newrow)
df=pd.concat([df,newrows], ignore_index=True) # merge new rows with existing ones
df=df.sort_values(['Filenumber','Areanumber'])
return df
def calccomp(df, Integquantlog, elemlist, AESquantparams):
'''Calculate elemental composition of given files based on input element list
threshold - ratio of element peak to noise peak (0 means no threshold applied
load element-dependent significance level from AESquantparams'''
thresholds=getelemthresholds(elemlist, AESquantparams) # Get list of sigma levels for significance/inclusion
# thresholds for both single and multipeak
elemlist, multipeaklist = parseelemlist(elemlist) # list of single peak elements and dict with multipeaks
# check if any of the single peaks are secondary (i.e. quant on Fe2 not main Fe)
elemlist, multipeaklist= parseelem2(elemlist, multipeaklist)
# two element lists needed (elements with one peak and elements with compositions averaged from two peaks i.e. Fe2, Fe3)
# to process compositions from multiple areas, clone rows from spe log (one for each areanum)
df=cloneparamrows(df) # splits single entry for 5 spatial area spe into 5 rows with Areanumber 1-5
df=df.reset_index(drop=True)
df['AESbasis']=0.0 # resets to zero if already present from calcamplitude
mycols=['Filenumber', 'Project', 'Filename', 'FilePath', 'Sample', 'Comments','AESbasis','Areanumber']
for i, elem in enumerate(elemlist): # add columns for basis
df[elem]=0.0 # add col for each element to spelist
df[elem+'cnts']=0.0 # also keep integrated counts
df['sig'+elem]=0.0 # copy peak significance (ratio of integrated counts over 1 sigma of background)
df['err'+elem]=0.0 # another for total error in adjusted counts basis
mycols.append(elem)
mycols.append(elem+'cnts')
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i,elem in enumerate(list(multipeaklist.keys())): # get elements (keys) from dict
df[elem]=0.0
df[elem+'cnts']=0.0
df['sig'+elem]=0.0
df['err'+elem]=0.0
mycols.append(elem)
mycols.append(elem+'cnts')
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i, elem in enumerate(elemlist): # now add at.% columns (e.g. %S, %Mg)
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i,elem in enumerate(list(multipeaklist.keys())): # add multipeak elements
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i in range(0,len(df)): # loop through all desired spectrum (multiarea ones already have duplicated rows)
filename=df.iloc[i]['Filename'] # more flexible than filenumber (which can be duplicated)
areanum=df.iloc[i]['Areanumber']
match=Integquantlog[Integquantlog['Filename']==filename] # find integ data for this filenumber
match=match[match['Areanumber']==areanum]
basis=0.0 #
for j, elem in enumerate(elemlist): # handle the single peak elements
temp=match[match['Element']==elem] # finds entry for this element
if len(temp)==1:
# thresholds is dict with required significance level for each element
thisthresh=thresholds.get(elem) # sig level for this element
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # always copy peak significance level
df=df.set_value(i, elem+'cnts', temp.iloc[0]['Integcounts']) # copy integrated counts
if temp.iloc[0]['Significance']>thisthresh: # if above set threshold then calculate elem's value and add to basis
df=df.set_value(i, elem, temp.iloc[0]['Adjcnts']) # copy adjusted counts of this element
df=df.set_value(i, 'err'+elem, temp.iloc[0]['Erradjcnts'])
basis+=temp.iloc[0]['Adjcnts'] # add this element's value to AES basis
# now handle the multipeak elements (get average value from both peaks)
for key, value in multipeaklist.items(): # key is element (aka colname in df), value is list of peaks in Smdifpeakslog
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
avgval=0.0 # working value for averaged adjamplitude
erravgval=0.0 # combined error from erradjcnts of each line
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds integquantlog entry for this peak (match already trimmed to filenum and area)
if len(temp)==1:
thisthresh=thresholds.get(peak) # sig level for this element/peak
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # copy peak significance level
df=df.set_value(i, elem+'cnts', temp.iloc[0]['Integcounts']) # copy integrated counts
if temp.iloc[0]['Significance']>thisthresh:
avgval+=temp.iloc[0]['Adjcnts']
thiserrperc=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']**2
erravgval+=thiserrperc # sum of square of relative error
else:
numlines=numlines-1 # if peak is zeroed out and not added, this reduces # peaks in average
if numlines>0: # avoid divbyzero if peak is too small
avgval=avgval/numlines # this is now average basis for given element
erravgval=np.sqrt(erravgval) # sqrt of sum of squares is relative error
df=df.set_value(i, key, avgval) # copy adjusted amplitude of this element
df=df.set_value(i, key+'err', avgval*erravgval) # combined actual error of this elem (as detemined from mulitple lines)
# add value from this element to AESbasis
basis+=avgval
# end of multipeak elements loop
df=df.set_value(i, 'AESbasis', basis) # write total basis value to df
# Now compute at.% for each listed element (incl errors)
for j, elem in enumerate(elemlist):
colname='%'+elem
ratio=df.iloc[i][elem]/df.iloc[i]['AESbasis'] # initialized to zero in cases where peak is below significance threshold
df.set_value(i, colname, ratio)
temp=match[match['Element']==elem] # again find peak entry and get finds entry for this peak
# TODO maybe check threshold again (although element's value will be zero)
if len(temp)==1:
thiserr=temp.iloc[0]['Erradjcnts']
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+elem # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# Also calculate for elements w/ multiple peaks (if present)
for key, value in multipeaklist.items():
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
colname='%'+key
ratio=df.iloc[i][key]/df.iloc[i]['AESbasis']
df.set_value(i, colname, ratio)
# TODO need to propagate errors through Fe & Fe2
errlist=[] # list of errors in % (usually max of two)
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds entry for this peak
if len(temp)==1:
if temp.iloc[0]['Adjcnts']>0: # skip negative values
err=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']
errlist.append(err) # add this to list
# combine errors in quadrature
totalerr=0.0
for j, err in enumerate(errlist):
totalerr+=err**2
totalerr=np.sqrt(totalerr) # percent error in at %
# now get actual error
thisval=df.iloc[i][key] # this is averaged value computed above (possibly zero if below thresholds )
thiserr=thisval*totalerr # error (in Fe) as actual value based on average of multiple peaks
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+ key # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# end of loop calculation for each spectrum
# organize data based on mycols template
dropcollist=[s for s in df.dtypes.index if s not in mycols]
df.drop(dropcollist, axis=1, inplace=True) # drops extraneous columns
df=df[mycols] # put in correct order
return df
def calcadjcounts(df, AESquantparams, sig=2, kerrors=True):
'''For each elemental peak in interquantlog, calculate or recalcuated adjusted counts using k-factor2 and mass
result stored in adjcnts column and used for subsequent compositional determinations
can change AESquantresults and recalc at any time; sig (aka 2 sigma errors) is default setting
kerrors -- include error associated with kfactor (along with Poisson errors)'''
if 'Adjcnts' not in df:
df['Adjcnts']=0.0 # new column for adjusted amplitude (if not already present)
if 'Erradjcnts' not in df:
df['Erradjcnts']=0.0 # new column for associated error
if 'err%cnts' not in df:
df['err%cnts']=0.0 # percentage error only from counting statistics (not including kfactor err)
if 'err%total' not in df:
df['err%total']=0.0 # percentage error only from counting statistics (not including kfactor err)
# loop for each element, mask df, get appropriate k-factor & mass
df=df.reset_index(drop=True) # go ahead and reset index
elemlist=np.ndarray.tolist(df.Element.unique()) # list of unique elements from df
for i,elem in enumerate(elemlist):
match=AESquantparams[(AESquantparams['element']==elem)]
match=match.reset_index(drop=True)
kfactor2=match.iloc[0]['kfactor2'] # kfactor and mass for this element/peak
errkf2=match.iloc[0]['errkf2'] # percent error in above for integ method
mass=match.iloc[0]['mass']
elemmask=(df['Element']==elem) # mask for this element in loop
for j in range(0,len(df)): # loop and set adjamplitude to amp*kfact/mass
if elemmask[j]==True: # row has this element
newval=df.iloc[j]['Integcounts']*kfactor2/mass
percerr=sig/np.sqrt(df.iloc[j]['Integcounts']) # 2/sqrt(N) is percent error
totalerr=np.sqrt(errkf2**2+percerr**2) # combine in quadrature
err=newval*totalerr # error value is adjusted counts * 2 sig error percentage
df=df.set_value(j,'Adjcnts',newval)
df=df.set_value(j,'err%cnts',percerr)
df=df.set_value(j,'err%total',totalerr)
df=df.set_value(j,'Erradjcnts',err)
return df
''' TESTING
df=lowerfitpeak
'''
def makelinebackground(df, areanum, fitparams):
'''Create linear background under peak region
passed small slice of Augerfile df just peak region and small adjacent background '''
if fitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
slope=fitparams[0]
intercept=fitparams[1]
backfitname='Backfit'+str(areanum)
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=slope*xval+intercept
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def makeinterplinebackground(df, areanum, fitbounds, lowfitparams, upperfitparams):
'''Create interpolated background from lower and upper peak fits
passed small slice of Augerfile df just peak region and small adjacent background
fitparams are 1)slope, 2) intercept 3) stdev of slope 4) stdev of intercept 5) rvalue'''
# check for n/a values
if lowfitparams[0]=='n/a' or upperfitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
lowslope=lowfitparams[0]
lowintercept=lowfitparams[1]
upslope=upperfitparams[0]
upintercept=upperfitparams[1]
backfitname='Backfit'+str(areanum)
for i in range(fitbounds[0],fitbounds[1]+1):
xval=df.loc[i]['Energy']
df=df.set_value(i,backfitname,xval*lowslope+lowintercept) # set linear values below peak (lower linear fit)
for i in range(fitbounds[2],fitbounds[3]+1):
xval=df.loc[i]['Energy']
df=df.set_value(i,backfitname,xval*upslope+upintercept) # set linear values above peak (upper linear fit)
# find length of gap region
evstep=1/(fitbounds[2]-fitbounds[1]-1) # Length of intermediate region (between lower and upper regions)
startrow=fitbounds[1]+1
for i in range(fitbounds[1]+1,fitbounds[2]): # now do region in between
xval=df.loc[i]['Energy']
yval=(1-evstep*(i-startrow))*(lowslope*xval+lowintercept)+evstep*(i-startrow)*(upslope*xval+upintercept)
df=df.set_value(i,backfitname,yval)
return df # return same df with interpolated background region added
def fitCapeak(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform linear fit
return chunk with backfit column added '''
if 'Smcounts'+str(areanum) in df: # some dfs may lack smoothed counts
colname='Smcounts'+str(areanum)
else:
colname='Counts'+str(areanum) # probably no critical difference between either
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
parabfunc=lambda x, a, b, c: a*x**2 + b*x + c # lambda definition of cubic poly
fitparams, cov =curve_fit(parabfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-parabfunc(xcol,*fitparams)), (ycol-parabfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# diagonal of covariance matrix contains variances for fit params
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a') # return all n/a
R2='n/a'
return df, fitparams, R2
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**2+ fitparams[1] * xval + fitparams[2]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makeCabackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=3: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=A*xval**2+ B* xval +C
df=df.set_value(index,backfitname,yval)
return df
'''
For background fit testing
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[0][0]
fittype=Elemdata[0][1]
integpeak=Elemdata[0][2]
lower1=Elemdata[0][3]
lower2=Elemdata[0][4]
upper1=Elemdata[0][5]
upper2=Elemdata[0][6]
df=fitregion
Augerfile.to_csv('C2010W_18Nov15_12231225.csv', index=False)
'''
''' TESTING OF BELOW FITS
plt.plot(xcol,ycol,'b-') # actual data in blue
plt.plot(xcol,gaussian(fitparams, xcol),'r-') # Gaussian fit in red
'''
def fitgauss(df, areanum, width, elem, AugerFileName, addgauss=True):
''' Gaussian fit of direct peaks (pass Augerfile just around peaks region
no need to save Gaussian fit, just return width and other params
integwidth pass from AESquantparams value'''
peakname='Peaks'+str(areanum)
# Remove nan values from peak region
df=df.dropna(subset=[peakname]) # remove nan entries from peak
# estimate initial Gaussian parameters from data
if df.empty: # deal with prior failed background fits (no data in this region after dropna
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
xc=df[peakname].idxmax() # estimate center based on peak max index
xc=df.loc[xc]['Energy'] # associated energy value near center
peakarea=df[peakname].sum() # decent area estimate
y0=0 #
params0=[xc,width,peakarea,y0] # initial params list (first guess at gaussian params)
xcol=df['Energy']
ycol=df[peakname] # Counts1, Counts2 or whatever
xcol=xcol.as_matrix() # convert both to numpy matrices
ycol=ycol.as_matrix()
# define standard gaussian funct (xc, width, area and yoffset are init params)
gaussian=lambda params, x: params[3]+params[2]/(params[1]*np.sqrt(2*np.pi))*np.exp(-((x-params[0])**2/(2*params[1]**2)))
# thisgauss= gaussian(params0,xcol)
errfunc=lambda p, xcol, ycol: ycol- gaussian(p,xcol) # lambda error funct definition
# sigma2FWHM = lambda sigma: sigma * sqrt(2 * log(2)) * 2 / sqrt(2) # convert Gaussian widths to FWHM?
try:
fitparams, cov, infodict, mesg, ier =optimize.leastsq(errfunc,params0,args=(xcol,ycol),full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((ycol-ycol.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
except: # fitting problem
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
if addgauss==True:
gaussname="Gauss"+str(areanum)
df[gaussname]='' # add col for gaussian fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=fitparams[3]+fitparams[2]/(fitparams[1]*np.sqrt(2*np.pi))*np.exp(-((xval-fitparams[0])**2/(2*fitparams[1]**2)))
df.set_value(index,gaussname,yval)
return df, fitparams, rsquared, ier
''' TESTING
For background fit testing
df=fitregion
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[1][0]
fittype=Elemdata[1][1]
integpeak=Elemdata[1][2]
lower1=Elemdata[1][3]
lower2=Elemdata[1][4]
upper1=Elemdata[1][5]
upper2=Elemdata[1][6]
integwidth=Elemdata[0][8]
if ier in [1,2,3,4]: print ('true')
'''
def findintegparams(Augerfile, Elements, AESquantparams, Shifts):
'''Grab integration width and expected counts peak position (also incorporates shift from deriv method)'''
halfwidths=[]
peakcenters=[]
Energyvals = Augerfile.Energy # for finding index #s corresponding to energy vals for this spectrum
for i, elem in enumerate(Elements):
thiselem=AESquantparams[AESquantparams['element']==elem]
if len(thiselem)!=1:
print('WARNING ... AES quant parameters not found for ', elem)
halfwidths.append(4) # default integration width
peakcenters.append('n/a') #
return halfwidths, peakcenters
halfwidths.append(int((thiselem.iloc[0]['integwidth']-1)/2)) # integration uses half-width on either side of center
integpeakeV=thiselem.iloc[0]['negpeak']-thiselem.iloc[0]['integpeak']+Shifts[i] # shift of direct peak (defined relative to deriv peak)
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-integpeakeV)) # tuple with index of closest and closest value
peakcenters.append(temptuple[0]) # first of tuple is closest index #
return halfwidths, peakcenters
def integpeaks(Augerfile, Backfitparams, areanum, Elements, Shifts, logmatch, AESquantparams):
''' Background fit for each direct peak, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as Augerfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits), also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
backfitparams is all elements but only this Augerfile
'''
#create Smdifpeaks dataframe for temp storage of each peak's params
Backfitparams=Backfitparams.dropna(subset=['Rval1']) # skip integration/Gaussian fit if background fit failed
AugerFileName=logmatch.Filename #
# Create temp df to hold and pass linear fit data
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Integresults=pd.DataFrame(columns=mycols) # empty df for all integ results for elems in this spe file
peakname='Peaks'+str(areanum) # this is counts - background (but only calculated in vicinity of known elemental peaks)
backfitname='Backfit'+str(areanum)
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
halfwidths, peakcenters=findintegparams(Augerfile, Elements, AESquantparams, Shifts)
# loop through and fit all peaks for each element in this spatial area
for i, elem in enumerate(Elements):
if i not in Backfitparams.index: # skips integ calc if backfit is n/a
continue # problem here ...why are all indices zero for Backfitparams?
thisbackfit=Backfitparams[Backfitparams['Element']==elem]
if len(thisbackfit)!=1:
print('Problem retrieving fit boundaries for ',elem, ' in ', AugerFileName)
continue
lower1=thisbackfit.iloc[0]['Lower1']
upper2=thisbackfit.iloc[0]['Upper2']
fitregion=Augerfile[lower1:upper2+1]
if fitregion.empty==True: # skip if no data present (already should be skipped in Elemdata)
print('No data present for ', elem, ' in ', AugerFileName)
continue
# also need accurate lower/upper bounds ... available from backfitparams
Integresult=pd.DataFrame(index=np.arange(0,1),columns=mycols) # blank df row for this element
# get integpeak, kfact, integwidth, siglevel
# addgauss if save of gaussian peak fit in Augerfile is desired
# Probably could skip Gaussian fitting entirely if peak is weak (check smdiff)
fitregion, fitparams, rsquared, ier = fitgauss(fitregion, areanum, halfwidths[i], elem, AugerFileName, addgauss=True)
addgauss=True # maybe pass this arg from elsewhere
if addgauss==True and ier in [1,2,3,4]: # copy gaussian fit over to csv file if successful
gaussname="Gauss"+str(areanum)
if gaussname not in Augerfile.dtypes.index: # add col if not already present
Augerfile[gaussname]='' # add col for gaussian fit
# Copy gaussian fit to Augerfile... fitregion only modified in new Gauss peak fit column
Augerfile.loc[fitregion.index,fitregion.columns]=fitregion
# if gaussian fit is successful set center integration channel to index nearest xc
# ier flag of 1,2,3,4 if fit succeeds but rsquared threshold is better
if rsquared!='n/a': # skip integcounts calc but do put 'n/a' entries in df
if rsquared>0.4:
xc=fitparams[0] # center of gaussian fit
center=int(round(xc,0))
tempdf=fitregion[fitregion['Energy']==center]
try:
centerindex=tempdf[peakname].idxmax() # corresponding index # of peak maximum
except:
print('Gaussian fit center out of data range for ', elem, ' in ', AugerFileName)
# use center based on deriv shift and relative offset (index corresponding to integpeakeV)
centerindex=peakcenters[i] # backup method of finding center of integration region
else: # indication of poor Gaussian fit R2<0.4 (use prior knowledge of peak position)
print('Failed gaussian fit for ', elem, ' in ', AugerFileName)
# set center integration channel to value passed by integpeak
# this is ideal energy value but adjusted by shift found using smooth-diff quant method
centerindex=peakcenters[i] # already stores index number of central peak (ideal - sm-diff shift value)
# Still do the counts integration for poor gaussian fits
# perform integration over peak center channel + integwidth on either side
Augerpeak=Augerfile[centerindex-halfwidths[i]:centerindex+halfwidths[i]+1]
integcounts=Augerpeak[peakname].sum() # get counts sum
backgroundcnts=Augerpeak[backfitname].sum() # sum counts over identical width in background fit
# Used for peak significance i.e. typically 2 sigma of background integration over identical width
# full integ width is 1.2*FWHM but integwidth here is closest integer half-width
# Write fit params from tuple over to Integresult df
Integresult.iloc[0]['Integcounts']=integcounts
Integresult.iloc[0]['Backcounts']=backgroundcnts
Integresult.iloc[0]['Significance']=round(integcounts/(np.sqrt(backgroundcnts)),3)
# TODO add 2/sqrt(n) calc of associated percent error (also can calculate later)
Integresult.iloc[0]['Numchannels']=halfwidths[i]*2+1
Integresult.iloc[0]['Rsquared']=rsquared
Integresult.iloc[0]['Element']=elem
# These will be n/a if fit fails
Integresult.iloc[0]['Xc']=fitparams[0]
Integresult.iloc[0]['Width']=fitparams[1]
Integresult.iloc[0]['Peakarea']=fitparams[2]
Integresult.iloc[0]['Y0']=fitparams[3]
Integresults=Integresults.append(Integresult, ignore_index=True) # add row to list with valid
# end of loop through each element
# assign params that are common to all areas/all peaks into rows of df (copied from original log)
for index,row in Integresults.iterrows():
Integresults.loc[index]['Filenumber']=logmatch.Filenumber
Integresults.iloc[index]['Filename']=logmatch.Filename
Integresults.iloc[index]['Filepath']=logmatch.FilePath
Integresults.iloc[index]['Sample']=logmatch.Sample
Integresults.iloc[index]['Comments']=logmatch.Comments
Integresults.loc[index]['Areanumber']=areanum
Integresults=Integresults[mycols] # put back in original order
return Augerfile, Integresults # df with direct peak fitting info for all areas/ all elements
''' TESTING BACKGROUNDS
elem, fittype, integpeak, lower1, lower2, upper1, upper2, kfact, integwidth, siglevel=Elemdata[5]
'''
def fitcubic(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform cubic fit
return chunk with backfit column added '''
if 'Smcounts'+str(areanum) in df:
colname='Smcounts'+str(areanum) # use smoothed data for background fits
else:
colname='Counts'+str(areanum)
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
cubicfunc=lambda x, a, b, c, d: a*x**3 + b*x**2 + c*x + d # lambda definition of cubic poly
fitparams, cov =curve_fit(cubicfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-cubicfunc(xcol,*fitparams)), (ycol-cubicfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# TODO insert special handling for failed fits (some R2 threshold)
# Maybe restrictions on curvature
except: # deal with failed fit
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
return df, fitparams
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**3+ fitparams[1] * xval**2 + fitparams[2] * xval + fitparams[3]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makecubicbackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=4: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
D=fitparams[3]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval= A * xval**3+ B * xval**2 + C * xval + D
df=df.set_value(index,backfitname,yval)
return df
def makesavgol(df, areanum, evbreaks):
'''Perform python smooth-diff used to guide selection of background regions
perform this in chunks between evbreaks (list), works for survey or multiplex, adds col to Augerfile and returns
evbreaks is list of index #s
'''
countsname='Counts'+str(areanum)
# add savgol column (only called if not present)
savgolname='Savgol'+str(areanum)
df[savgolname]=0.0 # add/initialize col for 2nd deriv Sav-gol
# Add 1 to last region boundary to avoid data truncation problem
evbreaks[-1]=evbreaks[-1]+1
for i in range(1,len(evbreaks)): # region 1 to nth region
thisreg=df.loc[evbreaks[i-1]:evbreaks[i]-1] # slice into separate multiplex regions and process separately
thisreg=thisreg[countsname] # convert to Series (keep these index)
myarr=np.asarray(thisreg) # convert to numpy array
window_size=11
deriv=2
order=2 # order of savgol fit
rate=1
order_range = range(order+1) # range object
half_window = (window_size -1) // 2 # type int
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# b is matrix 3 by window size
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) # series as long as array
# linalg.pinv gets pseudo-inverse of a matrix (window-sized series)
# .A of any matrix returns it as ndarray object
# Pad the signal at the extremes with values taken from the signal itself
firstvals = myarr[0] - np.abs(myarr[1:half_window+1][::-1] - myarr[0] )
lastvals = myarr[-1] + np.abs(myarr[-half_window-1:-1][::-1] - myarr[-1])
myarr= np.concatenate((firstvals, myarr, lastvals))
# Now convolve input signal and sav-gol processing 1D array .. thisreg is numpy array w/ savgol results
myarr=np.convolve( myarr, m[::-1], mode='valid')
thisreg.loc[evbreaks[i-1]:evbreaks[i]-1]=myarr # copies numpy array but keeps same indices
# for loop endpoint is 1 off from df indexing (due to different inclusion rules for last point of range)
for index in range(evbreaks[i-1],evbreaks[i]):
df.set_value(index,savgolname,thisreg.loc[index]) # copy vals from series into entire spe df
return df # returns savitsky-golay smooth diff over same full region
def fitsingleline(Augerfile, areanum, fitbounds):
'''Perform linear fit over entire background region (lower and upper) when both separately fitted give similar results
fitbounds is length 4 list of lower and upper index # boundaries ... fitparams not returned (since these shouldn't be significantly different
from those already stored, returns Augerfile with background fit added
'''
# linear fit over both background regions
cntname='Counts'+str(areanum) # use counts itself (not any smoothed counts)
backfitname='Backfit'+str(areanum)
indexrange=[i for i in range(fitbounds[0],fitbounds[1]+1)]
indexrange.extend([i for i in range(fitbounds[2],fitbounds[3]+1)])
fitregion=Augerfile[Augerfile.index.isin(indexrange)]
xdata=fitregion['Energy']
ydata=fitregion[cntname]
slope,intercept=np.polyfit(xdata, ydata, 1) # single linear fit over this region
for i in range(fitbounds[0],fitbounds[3]+1):
xval=Augerfile.loc[i]['Energy']
Augerfile=Augerfile.set_value(i,backfitname,xval*slope+intercept) # set linear values below peak (lower linear fit)
return Augerfile
def comparelinfits(lowfitparams,upperfitparams, thresh=2):
''' See if linear fits above and below peak are significantly different (ie. do slopes and intercepts
agree within 2 stdevs (or other chosen threshold)'''
# fitparams are: 1) slope, 2) intercept 3) stdev of slope 4) stdev of intercept 5) R-val
slope1, intercept1, errslope1, errintercept1, rval1 =lowfitparams
slope2, intercept2, errslope2, errintercept2, rval2 =upperfitparams
if slope1>slope2:
if slope1-thresh*errslope1 < slope2+thresh*errslope2: # no sig diff in slope
slopediff=False
else:
slopediff=True
else:
if slope2-thresh*errslope2 < slope1+thresh*errslope1: # no sig diff in slope
slopediff=False
else:
slopediff=True
# same comparison for intercepts
if intercept1>intercept2:
if intercept1-thresh*errintercept1 < intercept2+thresh*errintercept2: # no sig diff in intercept
interceptdiff=False
else:
interceptdiff=True
else:
if intercept2-thresh*errintercept2 < intercept1+thresh*errintercept1: # no sig diff in intercept
interceptdiff=False
else:
interceptdiff=True
if not slopediff and not interceptdiff:
diff=False
else:
diff=True
return diff
def refineboundfit(xdata,ydata, maxshift, fitbounds, AugerFileName, elem):
'''Perform linear fit over region, compute residuals, and adjust fit boundary to remove edges of peak regions that may appear
fitbounds is length 2 list of index # boundaries, returns [slope,intercept,rval,pval,stderr] and altered fit boundaries
'''
#TODO consider Ramer-Douglas-Peucker algorithm as next step
# linear fit over both background regions
lowbound=fitbounds[0]
upbound=fitbounds[1]
slope,intercept=np.polyfit(xdata, ydata, 1)
# manually calculate residuals
yfit=slope*xdata+intercept
resid=np.subtract(ydata,yfit)
thresh=1.5*resid.std() # set threshold above which point is removed for having high residual
# refit fit boundaries based on above
start=0
end=len(xdata)-1 # adjust for zero based indexing
try:
for i in range(0,maxshift):
if resid[i]>thresh:
start=start+1
lowbound=lowbound+1 # adjust index # boundary
else:
break
for i in range(len(resid)-1,len(resid)-maxshift-1,-1):
if resid[i]>thresh:
end=end-1
upbound=upbound-1 # adjust index # boundary
else:
break
except:
print('Background fit range adjustment failed for ',AugerFileName,'. Range for ', elem, 'is', str(lowbound),'-', str(upbound))
# Truncate xdata, ydata based on excluded points
xdata=xdata[start:end+1] # energy values
ydata=ydata[start:end+1]
minenergy=xdata[start]
maxenergy=xdata[-1] # last element will be max
evrange=str(minenergy)+'-'+str(maxenergy)
# refit line using optimum fit boundaries (returns slope,intercept, r_value, p_value, std_err)
(slope,intercept), cov = np.polyfit(xdata, ydata, 1, cov=True)
sd_slope=np.sqrt(cov[0,0]) # variance of slope is element 0,0 in covariance matrix
sd_intercept=np.sqrt(cov[1,1]) # same for standard deviation of intercept (sqrt of variance)
# Rvalue calculation from np.polyfit
p = np.poly1d((slope,intercept)) # polynomial funct w/ these params
# fit values, and mean
yhat = p(xdata) # or [p(z) for z in x]
ybar = np.sum(ydata)/len(ydata) # or sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
sstot = np.sum((ydata - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
# sum of squares of functional fit (minus average value) over sum of squares of dataminus average value
r_value = ssreg / sstot
linfitparams = [slope, intercept, sd_slope, sd_intercept, r_value]
# skip pvalue and std err (already have error for slope and intercept separately )
return linfitparams, lowbound, upbound, evrange
def fitpeakbackground(Augerfile, fitbounds, areanum, maxshift, elem, AugerFileName):
'''Simultaneously deal with linear fits below and above peak of interest
use residual/outlier detection to trim boundaries of linear fit regions (more robust than deriv threshold style
return lower and upper slopes/intercepts
background is either single linear fit or interpolated between the two
fitbounds (length 4 list) has index #s of lower and upper bounds of fit below peak and fit above peak
'''
cntname='Counts'+str(areanum) # can use Counts or Smcounts ... probably smcounts is better for background fitting
energybounds=[] # two strings with fitted energy ranges
# refine fit boundaries for lower region
lowfitreg=Augerfile[fitbounds[0]:fitbounds[1]+1] # already tested for out-of-range in definefitreg
xdata=lowfitreg['Energy'].as_matrix() # numpy arrays
ydata=lowfitreg[cntname].as_matrix()
lowfitparams, fitbounds[0],fitbounds[1], evrange=refineboundfit(xdata,ydata, maxshift, fitbounds[0:2], AugerFileName, elem)
# fitparams are: 1) slope, 2) intercept 3) stdev of slope 4) stdev of intercept 5) R-val of fit
energybounds.append(evrange) # string with energy range of lower fit
# refine fit boundaries for upper region
upfitreg=Augerfile[fitbounds[2]:fitbounds[3]+1] # already tested for out-of-range in definefitreg
xdata=upfitreg['Energy'].as_matrix()
ydata=upfitreg[cntname].as_matrix()
# returns best linear fit of region above peak and optimum fit boundaries
upperfitparams, fitbounds[2],fitbounds[3], evrange=refineboundfit(xdata,ydata, maxshift, fitbounds[2:], AugerFileName, elem)
energybounds.append(evrange) # energy range of upper fit
# if slopes/intercepts are in rough agreement, make single linear fit over entire backfit region (use )
mybool=comparelinfits(lowfitparams,upperfitparams, thresh=3) # if linear fit results above and below peak differ significantly
if mybool:
Augerfile = makeinterplinebackground(Augerfile, areanum, fitbounds, lowfitparams, upperfitparams)
else:
Augerfile=fitsingleline(Augerfile, areanum, fitbounds) # single linear fit over lower and upper simultaneous
# TODO should saved fitparams be altered to make clear that a single linear fit was used
return Augerfile, lowfitparams, upperfitparams, fitbounds, energybounds
def definefitreg(bound1, bound2, maxshift, Augerfile, evbreaks):
''' Widen fit region from standard size (based on allowed maxshift) and ensure that multiplex evbreaks are not included in the region
also make sure one doesn't go into region with no data; both fitbounds and evbreaks are index # for this file (not energy values) '''
lowbound=bound1
for i in range(bound1, bound1-maxshift-1, -1): # lower by allowed shift but ensure not at data boundary
if i not in evbreaks and i in Augerfile.index: # also ensure we don't exit boundaries of dataset here
lowbound=i
else:
break
upbound=bound2
for i in range(bound2, bound2+maxshift+1): # lower by allowed shift but ensure not at data boundary
if i not in evbreaks and i in Augerfile.index:
upbound=i
else:
break
return lowbound, upbound # this is range of Auger slice that'll be used
def fitbackgrounds(Augerfile, areanum, Elements, Shifts, AESquantparams, logmatch):
''' takes element strings and element list and returns tuple for each elem symbol containing all params necessary to find each Auger peak from given spe file
tuple for integ peak is symbol, ideal peak index #, and integ kfactor'''
# integpeak is position of direct peak relative to ideal negative peak in smooth-diff S7D7
# lower1,lower2 and upper1,upper2 are boundaries of lower and higher energy linear backgroundfit (again energies relative to negpeak)
# Incorporate shifting of background fit regions into this section from ideal position based on savgol deriv
Energyvals = Augerfile.Energy #
evbreaks=logmatch.Evbreaks # needed to ensure fit boundaries don't cross into adjacent element
if type(evbreaks)!=list: # if reloaded after save, needs conversion to list
tempstring=evbreaks.split('[')[1] # remove brackets from list
tempstring=tempstring.split(']')[0]
evbreaks=[int(s) for s in tempstring.split(',')] # convert string to list of break index values
AugerFileName=logmatch.Filename #
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Date', 'Areanumber', 'Element', 'Lower1', 'Lower2', 'Upper1',
'Upper2', 'Lowrange','Highrange','Peakshift', 'Fittype', 'P1','P1stdev','P2','P2stdev','Rval1','P3','P3stdev','P4','P4stdev','Rval2']
Backfitparams=pd.DataFrame(columns=mycols) # empty df
for i, elem in enumerate(Elements):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thisshift=Shifts[i] # shift in eV/index # corresponding to this peak from prior smdif quant
if thisshift=='n/a': # peak not in smdifpeakslog ... usually data out of range
thisshift=0 # just set shift to zero to avoid problems
# integ peak position value is relative to negpeak in smooth-diff (i.e. -5 is 5 eV below ideal negpeak)
integpeakev=thiselemdata.negpeak + thiselemdata.integpeak # ideal energy value of negative Auger peak in smooth-diff spectrum
lower1ev=thiselemdata.negpeak + thiselemdata.lower1 + thisshift # lower bound of lower energy fit region
lower2ev=thiselemdata.negpeak + thiselemdata.lower2 + thisshift # upper bound of lower energy fit region
upper1ev=thiselemdata.negpeak + thiselemdata.upper1 + thisshift # lower bound of higher energy fit region
upper2ev=thiselemdata.negpeak + thiselemdata.upper2 + thisshift # upper bound of higher energy fit region
# width=int(thiselemdata.searchwidth) # search width used to find actual peak in real data
# find index # for ideal neg and pos peaks... use lambda funct.
# min(Energyvals, key=lambda x:abs(x-negpeakev)) gives value but not index #
# convert each energy value into index # (global shift already applied)
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-integpeakev)) # tuple with index of closest and closest value
integpeak=temptuple[0] # first of tuple is index #
peakinrange=temptuple[1]-integpeakev # should be ~0 if desired peak is in data range
if abs(peakinrange)>0.5: # Must skip entire desired element here if it's out of range of the data in this particular spe
print(elem,' is out of data range for ', AugerFileName)
continue
fitbounds=[]
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-lower1ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-lower2ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-upper1ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-upper2ev)) # tuple with index of closest and closest value
fitbounds.append(temptuple[0]) # first of tuple is index #
maxshift=int(thiselemdata.windowshift) # get allowed max energy shift in channels (normally 1eV/chan)... used to expand background fit window
fittype=thiselemdata.fittype # default type of peak fit for given element
if fittype=='line':
fitbounds[0], fitbounds[1]= definefitreg(fitbounds[0], fitbounds[1], maxshift, Augerfile, evbreaks) # bounds for lower fit region
fitbounds[2], fitbounds[3]= definefitreg(fitbounds[2], fitbounds[3], maxshift, Augerfile, evbreaks) # bounds for upper fit region
# return fitpeakdf (new background fits), fitparams (slope,intercept, point fit range), R2 val (for tossing vals)
# Since linear fit may span both, pass both regions and deal with them simultaneously
Augerfile, lowfitparams, upperfitparams, fitbounds, energybounds=fitpeakbackground(Augerfile, fitbounds, areanum, maxshift, elem, AugerFileName)
elif fittype=='Ca': # special treatment
# find relative minimum if present between C falling edge and Ca peak
if 'Smcounts'+str(areanum) in Augerfile:
countname='Smcounts'+str(areanum)
else:
countname='Counts'+str(areanum)
minindex=Augerfile[fitbounds[0]:fitbounds[0]+10][countname].idxmin() # index value of min left of Ca peak (counts or smoothed counts)
# minval=Augerfile[lower1:lower1+10][countname].min()
# maxindex=Augerfile[integpeak-5:integpeak+5][countname].idxmax() # Ca peak index if present
# maxval=Augerfile[integpeak-5:integpeak+5][countname].max()
# polynomial fit over two pts at relative min left of peak and small region right of peak
fitbounds[0]=minindex-1
fitbounds[1]=minindex
# Now refine boundaries/ find linear region above Ca (and C) peaks
# Expands region of fit if no peaks are encountered
fitbounds[2], fitbounds[3]= definefitreg(fitbounds[2], fitbounds[3], maxshift, Augerfile, evbreaks)
thispeak=pd.concat([Augerfile[minindex-1:minindex+1],Augerfile[fitbounds[2]:fitbounds[3]]])
# Get energy range string from fit region on low energy size
lowevrange=str(round(Augerfile[minindex-1:minindex+1]['Energy'].min(),0))+'-'+ str(round(Augerfile[minindex-1:minindex+1]['Energy'].max(),0))
# Get a few more at upper energy end
upperevrange=str(round(Augerfile[fitbounds[2]:fitbounds[3]]['Energy'].min(),0))+'-'+ str(round(Augerfile[fitbounds[2]:fitbounds[3]]['Energy'].max(),0))
thispeak, fitparams, R2 =fitCapeak(thispeak, areanum, elem, AugerFileName) # polynomial fit
if R2!='n/a': # only copy successful fits (skip n/a)
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy over to full spe file
thispeak=Augerfile[minindex+1:integpeak+11] # actual peak region
thispeak = makeCabackground(thispeak, areanum, fitparams) # now fill peak region with 2nd order poly background
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy peak region to source data file
# Make subtracted peak
countname='Counts'+str(areanum)
peakname='Peaks'+str(areanum)
backfitname='Backfit'+str(areanum)
for index in range(fitbounds[1],fitbounds[2]):
Augerfile.set_value(index, peakname, Augerfile.loc[index][countname]-Augerfile.loc[index][backfitname])
else:
print('Need to write fitting functions for fittype', fittype,' for ', elem)
continue # next in loop to avoid errors below
# Make subtracted peak column (TODO maybe make this optional?)
countname='Counts'+str(areanum)
peakname='Peaks'+str(areanum)
backfitname='Backfit'+str(areanum)
for index in range(fitbounds[1],fitbounds[2]):
Augerfile.set_value(index, peakname, Augerfile.loc[index][countname]-Augerfile.loc[index][backfitname])
# TODO Integration
# create single-rowed dataframe for backfitparams of this element (out-of-range data already skipped)
Backfitparamrow=pd.DataFrame(index=np.arange(0,1),columns=mycols)
# transfer common parameters
Backfitparamrow.iloc[0]['Areanumber']=areanum
Backfitparamrow.iloc[0]['Element']=elem
Backfitparamrow.iloc[0]['Peakshift']=Shifts[i] # shift of this elem's peak based on derivative method
Backfitparamrow.iloc[0]['Filenumber']=logmatch.Filenumber
Backfitparamrow.iloc[0]['Filename']=logmatch.Filename
Backfitparamrow.iloc[0]['Filepath']=logmatch.FilePath
Backfitparamrow.iloc[0]['Sample']=logmatch.Sample
Backfitparamrow.iloc[0]['Comments']=logmatch.Comments
Backfitparamrow.iloc[0]['Date']=logmatch.Date
Backfitparamrow.iloc[0]['Fittype']=fittype # string with type of background fit to attempt
if fittype=='line':
Backfitparamrow.iloc[0]['Lower1']=fitbounds[0] # save boundaries of fit regions
Backfitparamrow.iloc[0]['Lower2']=fitbounds[1]
Backfitparamrow.iloc[0]['Upper1']=fitbounds[2]
Backfitparamrow.iloc[0]['Upper2']=fitbounds[3]
Backfitparamrow.iloc[0]['Lowrange']=str(energybounds[0]) # string with lower fitted eV range
Backfitparamrow.iloc[0]['Highrange']=str(energybounds[1])# string with upper fitted eV range
Backfitparamrow.iloc[0]['P1']=lowfitparams[0] # slope for lower fit
Backfitparamrow.iloc[0]['P2']=lowfitparams[1] # intercept for single fit
Backfitparamrow.iloc[0]['P1stdev']=lowfitparams[2] # stdev of slope
Backfitparamrow.iloc[0]['P2stdev']=lowfitparams[3] # stdev of intercept
Backfitparamrow.iloc[0]['Rval1']=lowfitparams[4] # R-value of fit
Backfitparamrow.iloc[0]['P3']=upperfitparams[0] # slope for upper fit
Backfitparamrow.iloc[0]['P4']=upperfitparams[1] # intercept for upper fit
Backfitparamrow.iloc[0]['P3stdev']=upperfitparams[2] # stdev of slope
Backfitparamrow.iloc[0]['P4stdev']=upperfitparams[3] # stdev of intercept
Backfitparamrow.iloc[0]['Rval2']=upperfitparams[4] # R-value of fit
if fittype=='Ca':
# copy from lowerfitparams to log df
Backfitparamrow.iloc[0]['Lower1']=fitbounds[0] # save boundaries of fit regions
Backfitparamrow.iloc[0]['Lower2']=fitbounds[1]
Backfitparamrow.iloc[0]['Upper1']=fitbounds[2]
Backfitparamrow.iloc[0]['Upper2']=fitbounds[3]
Backfitparamrow.iloc[0]['Lowrange']=lowevrange
Backfitparamrow.iloc[0]['Highrange']=upperevrange
Backfitparamrow.iloc[0]['P1']=fitparams[0] # A*x2 coeff
Backfitparamrow.iloc[0]['P2']=fitparams[1] # B*x coeff
Backfitparamrow.iloc[0]['P3']=fitparams[2] # C coeff
Backfitparamrow.iloc[0]['Rval1']=R2
Backfitparams=Backfitparams.append(Backfitparamrow)
Backfitparams=Backfitparams[mycols]
Backfitparams=Backfitparams.reset_index(drop=True) # removes duplicate indices which can cause later problems
return Augerfile, Backfitparams
def findpeakshifts(Augerfile, AESquantparams, logmatch, areanum, Smdifpeakslog, Elements):
''' Find shifts of negpeak positions for each element in list for single spe file (using
smdiff data), also finds direct peak
return as list of floats
pass series with filename and given area
'''
# TODO problem if len(Elements)!=len(Shifts) due to couldn't find peak error
Shiftdiffs=pd.DataFrame(columns=['Filename','Areanumber','Element','Avgshift','Intshift','Diffshift','Amplitude'])
filename=logmatch.Filename # get number from Series
thispeakslog= Smdifpeakslog[(Smdifpeakslog['Filename']==filename)&(Smdifpeakslog['Areanumber']==areanum)]
# need to match area number and file number for finding unique shift for this elem
Shifts=[] # shift in peak position suggested by smdiff quant method
for i, elem in enumerate(Elements):
# find peak location based on smooth-diff negative peak
smdifval= thispeakslog[(thispeakslog['PeakID']==elem)]
if len(smdifval)!=1: # peaks not present should have already been removed
print ("Couldn't find ", elem, " peak for area", str(areanum),"of spectrum ", filename)
diffshift='n/a'
ampl='n/a'
if len(smdifval)==1: # should be match for all peaks that are present
diffshift=smdifval.iloc[0]['Shift']
ampl=smdifval.iloc[0]['Amplitude']
# find direct counts peak maximum
thiselem= AESquantparams[(AESquantparams['element']==elem)]
if len(thiselem)!=1:
print("Couldn't find ", elem, " in AESquantparams.")
else:
# extract range
center=thiselem.iloc[0]['negpeak']+thiselem.iloc[0]['integpeak']
thismin=center-thiselem.iloc[0]['searchwidth']
thismax=center+thiselem.iloc[0]['searchwidth']
Augerslice=Augerfile[ (Augerfile['Energy']>=thismin) & (Augerfile['Energy']<=thismax)]
# x energy value of true counts max (use smoothed rather than raw counts)
intshift=int(Augerslice.loc[Augerslice['Smcounts'+str(areanum)].idxmax()]['Energy']-center)
# Reconcile differences
avgshift=int((diffshift+intshift)/2)
thisrow=
|
pd.Series([filename, areanum, elem, avgshift, intshift, diffshift, ampl], index=['Filename','Areanumber', 'Element','Avgshift','Intshift','Diffshift','Amplitude'])
|
pandas.Series
|
# coding: utf-8
# ## Creating and Combining DataFrame
# <b>class pandas.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)</b>
#
# Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects.
#
# <b>class pandas.Series(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False)</b>
# One-dimensional ndarray with axis labels (including time series).
#
# Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN).
#
# ### Here are the main steps we will go through
# * How to create dataframe using pandas?
# * How to combine two data set using pandas?
#
# This is Just a little illustration.
#
# <img style="float: left;" src="https://www.tutorialspoint.com/python_pandas/images/structure_table.jpg"></img>
# In[5]:
import pandas as pd
import numpy as np
# #### How to create dataframe using pandas?
# In[16]:
# working with series
#create a series
s = pd.Series(np.random.randn(5))
#create a dataframe column
df = pd.DataFrame(s, columns=['Column_1'])
df
# In[8]:
#sorting
df.sort_values(by='Column_1')
# In[10]:
#boolean indexing
#It returns all rows in column_name,
#that are less than 10
df[df['Column_1'] <= 1]
# In[230]:
# creating simple series
obj2 = pd.Series(np.random.randn(5), index=['d', 'b', 'a', 'c', 'e'])
obj2
# In[20]:
obj2.index
# In[229]:
# returns the value in e
obj2['e']
# In[26]:
# returns all values that are greater than -2
obj2[obj2 > -2]
# In[27]:
# we can do multiplication on dataframe
obj2 * 2
# In[28]:
# we can do boolean expression
'b' in obj2
# In[228]:
# returns false, because 'g' is not defined in our data
'g' in obj2
# In[39]:
#Let's see we have this data
sdata = {'Cat': 24, 'Dog': 11, 'Fox': 18, 'Horse': 1000}
obj3 = pd.Series(sdata)
obj3
# In[227]:
# defined list, and assign series to it
sindex = ['Lion', 'Dog', 'Cat', 'Horse']
obj4 = pd.Series(sdata, index=sindex)
obj4
# In[226]:
# checking if our data contains null
obj4.isnull()
# In[44]:
#we can add two dataframe together
obj3 + obj4
# In[224]:
# we can create series calling Series function on pandas
programming =
|
pd.Series([89,78,90,100,98])
|
pandas.Series
|
import datetime
# from directory import BASE_DIR
import pandas as pd
import openpyxl
import os
import datetime
import numpy as np
BASE_DIR = os.getcwd()
file_name = 'record.csv'
file_dir = os.path.join(BASE_DIR,'data',file_name)
columns = ['Start', ' End', 'Pomo', 'Task']
class Record:
def __init__(self):
if not os.path.exists(file_dir):
print("file does not exists. create one.")
self.create_csv()
self.pomo = np.nan
self.task = ""
self.duration = np.nan
self.recording = False
def start_record(self, pomo):
if pomo == 1:
pomo = 'focus'
print(pomo, 'starts recording')
start = pd.to_datetime('today').replace(microsecond=0)
self.row = [start, np.nan, pomo, np.nan]
self.recording = True
elif pomo == 2:
pomo = 'rest'
def end_record(self, subtract_sec = 0):
if self.recording == True:
print('stops recording')
end = pd.to_datetime('today').replace(microsecond=0)
#subtract seconds
end = end - datetime.timedelta(seconds = subtract_sec)
self.row[1] = end
self.row[3] = self.task
self.recording = False
self._save_record()
def _save_record(self):
self.df = pd.read_csv(file_dir)
row =
|
pd.DataFrame([self.row],columns=columns)
|
pandas.DataFrame
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pandas as pd
# File called _pytest for PyCharm compatibility
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from tests.common import TestData, assert_almost_equal
class TestDataFrameMetrics(TestData):
funcs = ["max", "min", "mean", "sum"]
extended_funcs = ["median", "mad", "var", "std"]
filter_data = [
"AvgTicketPrice",
"Cancelled",
"dayOfWeek",
"timestamp",
"DestCountry",
]
@pytest.mark.parametrize("numeric_only", [False, None])
def test_flights_metrics(self, numeric_only):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
for func in self.funcs:
# Pandas v1.0 doesn't support mean() on datetime
# Pandas and Eland don't support sum() on datetime
if not numeric_only:
dtype_include = (
[np.number, np.datetime64]
if func not in ("mean", "sum")
else [np.number]
)
pd_flights = pd_flights.select_dtypes(include=dtype_include)
ed_flights = ed_flights.select_dtypes(include=dtype_include)
pd_metric = getattr(pd_flights, func)(numeric_only=numeric_only)
ed_metric = getattr(ed_flights, func)(numeric_only=numeric_only)
assert_series_equal(pd_metric, ed_metric, check_dtype=False)
def test_flights_extended_metrics(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
# Test on reduced set of data for more consistent
# median behaviour + better var, std test for sample vs population
pd_flights = pd_flights[["AvgTicketPrice"]]
ed_flights = ed_flights[["AvgTicketPrice"]]
import logging
logger = logging.getLogger("elasticsearch")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
for func in self.extended_funcs:
pd_metric = getattr(pd_flights, func)(
**({"numeric_only": True} if func != "mad" else {})
)
ed_metric = getattr(ed_flights, func)(numeric_only=True)
pd_value = pd_metric["AvgTicketPrice"]
ed_value = ed_metric["AvgTicketPrice"]
assert (ed_value * 0.9) <= pd_value <= (ed_value * 1.1) # +/-10%
def test_flights_extended_metrics_nan(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
# Test on single row to test NaN behaviour of sample std/variance
pd_flights_1 = pd_flights[pd_flights.FlightNum == "9HY9SWR"][["AvgTicketPrice"]]
ed_flights_1 = ed_flights[ed_flights.FlightNum == "9HY9SWR"][["AvgTicketPrice"]]
for func in self.extended_funcs:
pd_metric = getattr(pd_flights_1, func)()
ed_metric = getattr(ed_flights_1, func)(numeric_only=False)
assert_series_equal(pd_metric, ed_metric, check_exact=False)
# Test on zero rows to test NaN behaviour of sample std/variance
pd_flights_0 = pd_flights[pd_flights.FlightNum == "XXX"][["AvgTicketPrice"]]
ed_flights_0 = ed_flights[ed_flights.FlightNum == "XXX"][["AvgTicketPrice"]]
for func in self.extended_funcs:
pd_metric = getattr(pd_flights_0, func)()
ed_metric = getattr(ed_flights_0, func)(numeric_only=False)
|
assert_series_equal(pd_metric, ed_metric, check_exact=False)
|
pandas.testing.assert_series_equal
|
#!/home/cab22/miniconda3/bin/python
#SBATCH --account=commons
#SBATCH --export=All
#SBATCH --partition=commons
#SBATCH --time=24:00:00
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=2
#SBATCH --gres=gpu:1
#SBATCH --time=24:00:00
#SBATCH --export=ALL
#SBATCH --array=0-15
#SBATCH --mem=16G
import os
import subprocess
import itertools
import numpy as np
import warnings
import pandas
import time
import argparse
class SlurmJobArray():
""" Selects a single condition from an array of parameters using the SLURM_ARRAY_TASK_ID environment variable. The parameters need to be supplied as a dictionary. if the task is not in a slurm environment, the test parameters will supersede the parameters, and the job_id would be taken as 0. Example:
parameters={"epsilon":[100],
"aligned":[True,False],
"actinLen":[20,40,60,80,100,120,140,160,180,200,220,240,260,280,300],
"repetition":range(5),
"temperature":[300],
"system2D":[False],
"simulation_platform":["OpenCL"]}
test_parameters={"simulation_platform":"CPU"}
sjob=SlurmJobArray("ActinSimv6", parameters, test_parameters)
:var test_run: Boolean: This simulation is a test
:var job_id: SLURM_ARRAY_TASK_ID
:var all_parameters: Parameters used to initialize the job
:var parameters: Parameters for this particular job
:var name: The name (and relative path) of the output
"""
def __init__(self, name, parameters, test_parameters={},test_id=0):
"""
Args:
name:
parameters:
Returns:
name:
parameters:
"""
self.all_parameters=parameters
self.test_parameters=test_parameters
#Parse the slurm variables
self.slurm_variables={}
for key in os.environ:
if len(key.split("_"))>1 and key.split("_")[0]=='SLURM':
self.slurm_variables.update({key:os.environ[key]})
#Check if there is a job id
self.test_run=False
try:
self.job_id=int(self.slurm_variables["SLURM_ARRAY_TASK_ID"])
except KeyError:
self.test_run=True
warnings.warn("Test Run: SLURM_ARRAY_TASK_ID not in environment variables")
self.job_id=test_id
keys=parameters.keys()
self.all_conditions=list(itertools.product(*[parameters[k] for k in keys]))
self.parameter=dict(zip(keys,self.all_conditions[self.job_id]))
#The name only includes enough information to differentiate the simulations.
self.name=f"{name}_{self.job_id:03d}_" + '_'.join([f"{a[0]}_{self[a]}" for a in self.parameter if len(self.all_parameters[a])>1])
def __getitem__(self, name):
if self.test_run:
try:
return self.test_parameters[name]
except KeyError:
return self.parameter[name]
else:
return self.parameter[name]
def __getattr__(self, name: str):
""" The keys of the parameters can be called as attributes
"""
if name in self.__dict__:
return object.__getattribute__(self, name)
elif name in self.parameter:
return self[name]
else:
return object.__getattribute__(self, name)
def __repr__(self):
return str(self.parameter)
def keys(self):
return str(self.parameters.keys())
def print_parameters(self):
print(f"Number of conditions: {len(self.all_conditions)}")
print("Running Conditions")
for k in self.parameter.keys():
print(f"{k} :", f"{self[k]}")
print()
def print_slurm_variables(self):
print("Slurm Variables")
for key in self.slurm_variables:
print (key,":",self.slurm_variables[key])
print()
def write_csv(self, out=""):
s=pandas.concat([pandas.Series(self.parameter), pandas.Series(self.slurm_variables)])
s['test_run']=self.test_run
s['date']=time.strftime("%Y_%m_%d")
s['name']=self.name
s['job_id']=self.job_id
if out=='':
s.to_csv(self.name+'.param')
else:
s.to_csv(out)
################
# Coarse Actin #
################
#!/usr/bin/python3
"""
Coarse Actin simulations using a custom
"""
import openmm
import openmm.app
from simtk import unit
import numpy as np
import pandas
import sklearn.decomposition
import configparser
import prody
import scipy.spatial.distance as sdist
import os
import sys
__author__ = '<NAME>'
__version__ = '0.2'
#__location__ = os.path.realpath(
# os.path.join(os.getcwd(), os.path.dirname(__file__)))
#__location__="/scratch/cab22/Bundling/Persistence_length/Persistence_length"
__location__='.'
_ef = 1 * unit.kilocalorie / unit.kilojoule # energy scaling factor
_df = 1 * unit.angstrom / unit.nanometer # distance scaling factor
_af = 1 * unit.degree / unit.radian # angle scaling factor
def parseConfigTable(config_section):
"""Parses a section of the configuration file as a table"""
def readData(config_section, a):
"""Filters comments and returns values as a list"""
temp = config_section.get(a).split('#')[0].split()
l = []
for val in temp:
val = val.strip()
try:
x = int(val)
l += [x]
except ValueError:
try:
y = float(val)
l += [y]
except ValueError:
l += [val]
return l
data = []
for a in config_section:
if a == 'name':
columns = readData(config_section, a)
elif len(a) > 3 and a[:3] == 'row':
data += [readData(config_section, a)]
else:
print(f'Unexpected row {readData(config_section, a)}')
return pandas.DataFrame(data, columns=columns)
# Random rotation matrix
def random_rotation():
"""Generate a 3D random rotation matrix.
Returns:
np.matrix: A 3D rotation matrix.
"""
x1, x2, x3 = np.random.rand(3)
R = np.matrix([[np.cos(2 * np.pi * x1), np.sin(2 * np.pi * x1), 0],
[-np.sin(2 * np.pi * x1), np.cos(2 * np.pi * x1), 0],
[0, 0, 1]])
v = np.matrix([[np.cos(2 * np.pi * x2) * np.sqrt(x3)],
[np.sin(2 * np.pi * x2) * np.sqrt(x3)],
[np.sqrt(1 - x3)]])
H = np.eye(3) - 2 * v * v.T
M = -H * R
return M
# Optimal rotation matrix
# The longest coordinate is X, then Y, then Z.
def optimal_rotation(coords):
c = coords.copy()
c -= c.mean(axis=0)
pca = sklearn.decomposition.PCA()
pca.fit(c)
# Change rotoinversion matrices to rotation matrices
rot = pca.components_[[0, 1, 2]]
if np.linalg.det(rot) < 0:
rot = -rot
#print(rot, np.linalg.det(rot))
return rot
class SystemData:
def __init__(self, atoms, bonds=None, angles=None, dihedrals=None, impropers=None):
self.atoms = atoms
self.atoms.index = np.arange(1, len(self.atoms) + 1)
self.masses = atoms[['type', 'mass']].drop_duplicates()
self.masses.index = np.arange(1, len(self.masses) + 1)
self.n_atoms = len(self.atoms)
self.n_atomtypes = len(self.masses)
if bonds is not None:
self.bonds = bonds
self.bonds.index = np.arange(1, len(self.bonds) + 1)
self.bondtypes = bonds[['type', 'x0', 'k']].drop_duplicates()
self.bondtypes.index = np.arange(1, len(self.bondtypes) + 1)
self.n_bonds = len(self.bonds)
self.n_bondtypes = len(self.bondtypes)
else:
self.bonds = pandas.DataFrame()
self.bondtypes = pandas.DataFrame()
self.n_bonds = 0
self.n_bondtypes = 0
if angles is not None:
self.angles = angles
self.angles.index = np.arange(1, len(self.angles) + 1)
self.angletypes = angles[['type', 'x0', 'k']].drop_duplicates()
self.angletypes.index = np.arange(1, len(self.angletypes) + 1)
self.n_angles = len(self.angles)
self.n_angletypes = len(self.angletypes)
else:
self.angles = pandas.DataFrame()
self.angletypes = pandas.DataFrame()
self.n_angles = 0
self.n_angletypes = 0
if dihedrals is not None:
self.dihedrals = dihedrals
self.dihedrals.index = np.arange(1, len(self.dihedrals) + 1)
self.dihedraltypes = dihedrals[['type', 'x0', 'k']].drop_duplicates()
self.dihedraltypes.index = np.arange(1, len(self.dihedraltypes) + 1)
self.n_dihedrals = len(self.dihedrals)
self.n_dihedraltypes = len(self.dihedraltypes)
else:
self.dihedrals = pandas.DataFrame()
self.dihedraltypes = pandas.DataFrame()
self.n_dihedrals = 0
self.n_dihedraltypes = 0
if impropers is not None:
self.impropers = impropers
self.impropers.index = np.arange(1, len(self.impropers) + 1)
self.impropertypes = impropers[['type', 'x0', 'k']].drop_duplicates()
self.impropertypes.index = np.arange(1, len(self.impropertypes) + 1)
self.n_impropers = len(self.impropers)
self.n_impropertypes = len(self.impropertypes)
else:
self.impropers = pandas.DataFrame()
self.impropertypes = pandas.DataFrame()
self.n_impropers = 0
self.n_impropertypes = 0
# self.n_bonds=len(self.bonds)
# self.n_bondtypes=len(self.bondtypes)
# self.xmin,self.xmax=atoms['x'].min(),atoms['x'].max()
# self.ymin,self.ymax=atoms['y'].min(),atoms['y'].max()
# self.zmin,self.zmax=atoms['z'].min(),atoms['z'].max()
def write_data(self, file_name='actin.data', box_size=1000):
self.xmin, self.xmax = 0, box_size
self.ymin, self.ymax = 0, box_size
self.zmin, self.zmax = 0, box_size
with open(file_name, 'w+') as f:
f.write('LAMMPS data file generated with python\n\n')
f.write('\t%i atoms\n' % self.n_atoms)
f.write('\t%i bonds\n' % self.n_bonds)
f.write('\t%i angles\n' % self.n_angles)
f.write('\t%i dihedrals\n' % self.n_dihedrals)
f.write('\t%i impropers\n' % self.n_impropers)
f.write('\n')
f.write('\t%i atom types\n' % self.n_atomtypes)
f.write('\t%i bond types\n' % self.n_bondtypes)
f.write('\t%i angle types\n' % self.n_angletypes)
f.write('\t%i dihedral types\n' % self.n_dihedraltypes)
f.write('\t%i improper types\n' % self.n_impropertypes)
f.write('\n')
f.write('\t %f %f xlo xhi\n' % (self.xmin, self.xmax))
f.write('\t %f %f ylo yhi\n' % (self.ymin, self.ymax))
f.write('\t %f %f zlo zhi\n' % (self.zmin, self.zmax))
f.write('\n')
f.write('Masses\n\n')
for i, m in self.masses.iterrows():
f.write('\t%i\t%f\n' % (i, m.mass))
f.write('\n')
f.write('Atoms\n\n')
for i, a in self.atoms.iterrows():
f.write('\t%i\t%i\t%i\t%f\t%f\t%f\t%f\n' % (i, a.molecule, a.type, a.q, a.x, a.y, a.z))
f.write('\n')
if self.n_bonds > 0:
f.write('Bonds\n\n')
for i, b in self.bonds.iterrows():
f.write('\t%i\t%i\t%i\t%i\n' % (i, b.type, b.i, b.j))
f.write('\n')
if self.n_angles > 0:
f.write('Angles\n\n')
for i, b in self.angles.iterrows():
f.write('\t%i\t%i\t%i\t%i\t%i\n' % (i, b.type, b.i, b.j, b.l))
f.write('\n')
if self.n_dihedrals > 0:
f.write('Dihedrals\n\n')
for i, b in self.dihedrals.iterrows():
f.write('\t%i\t%i\t%i\t%i\t%i\t%i\n' % (i, b.type, b.i, b.j, b.l, b.m))
f.write('\n')
if self.n_impropers > 0:
f.write('Impropers\n\n')
for i, b in self.impropers.iterrows():
f.write('\t%i\t%i\t%i\t%i\t%i\t%i\n' % (i, b.type, b.i, b.j, b.l, b.m))
f.write('\n')
def write_pdb(self, file_name='actin.pdb'):
import string
cc = (string.ascii_uppercase.replace('X','') + string.ascii_lowercase + '1234567890'+'X')*1000
cc_d = dict(zip(range(1, len(cc) + 1), cc))
pdb_line = '%-6s%5i %-4s%1s%3s %1s%4i%1s %8s%8s%8s%6.2f%6.2f %2s%2s\n'
pdb_atoms = self.atoms.copy()
pdb_atoms['serial'] = np.arange(1, len(self.atoms) + 1)
# pdb_atoms['name'] = self.atoms['type'].replace({1:'A1',2:'A2',3:'A3',4:'A4',5:'A5',6:'C1',7:'C2'})
pdb_atoms['altLoc'] = ''
# pdb_atoms['resName'] = self.atoms['molecule_name'].replace({'actin':'ACT','camkii':'CAM'})
pdb_atoms['resName'] = self.atoms['resname']
#pdb_atoms['chainID'] = self.atoms['molecule'].replace(cc_d)
pdb_atoms['chainID'] = self.atoms['chainID']
# assert False
# pdb_atoms['resSeq'] = 0
pdb_atoms['iCode'] = ''
# pdb_atoms['x'] =
# pdb_atoms['y'] =
# pdb_atoms['z'] =
pdb_atoms['occupancy'] = 0
pdb_atoms['tempFactor'] = 0
pdb_atoms['element'] = self.atoms['type'].replace(
{1: 'C', 2: 'O', 3: 'N', 4: 'P', 5: 'H', 6: 'H', 7: 'H', 8: 'Mg', 9: 'Fe', 10: 'C'})
pdb_atoms['charge'] = 0 # self.atoms['q'].astype(int)
with open(file_name, 'w+') as f:
chain = 'NoChain'
resSeq = 0
for i, a in pdb_atoms.iterrows():
if a['chainID'] != chain:
resSeq = 1
chain = a['chainID']
else:
resSeq += 1
f.write(pdb_line % ('ATOM',
int(a['serial']),
a['name'].center(4),
a['altLoc'],
a['resName'],
a['chainID'],
a['resid'],
a['iCode'],
('%8.3f' % (a['x'] / 10))[:8],
('%8.3f' % (a['y'] / 10))[:8],
('%8.3f' % (a['z'] / 10))[:8],
a['occupancy'],
a['tempFactor'],
a['element'],
a['charge']))
def write_psf(self, file_name='actin.psf'):
pass
def write_gro(self, file_name='actin.gro', box_size=1000):
gro_line = "%5d%-5s%5s%5d%8s%8s%8s%8s%8s%8s\n"
pdb_atoms = self.atoms.copy()
pdb_atoms['resName'] = self.atoms[
'resname'] # self.atoms['molecule_name'].replace({'actin':'ACT','camkii':'CAM'})
# pdb_atoms['name'] = self.atoms['type'].replace({1:'Aa',2:'Ab',3:'Ca',4:'Cb',5:'Da',6:'Db'})
pdb_atoms['serial'] = np.arange(1, len(self.atoms) + 1)
pdb_atoms['chainID'] = self.atoms['molecule']
self.xmin, self.xmax = 0, box_size
self.ymin, self.ymax = 0, box_size
self.zmin, self.zmax = 0, box_size
resSeq = 0
with open(file_name, 'w+') as f:
f.write('Generated Model\n')
f.write('%5i\n' % len(pdb_atoms))
chain = 'NoChain'
resSeq = 0
for i, a in pdb_atoms.iterrows():
if a['molecule'] != chain:
resSeq = 1
chain = a['molecule']
else:
resSeq += 1
f.write(gro_line % (a['molecule'],
a['resName'],
a['name'],
int(a['serial']),
('%8.3f' % (a['x'] / 10))[:8],
('%8.3f' % (a['y'] / 10))[:8],
('%8.3f' % (a['z'] / 10))[:8],
'', '', ''))
f.write((' ' + ' '.join(['%8.3f'] * 3) + '\n') % (self.xmax, self.ymax, self.zmax))
def print_coeff(self):
if self.n_bonds > 0:
self.bondtypes = self.bondtypes.sort_values('type')
for i, b in self.bondtypes.iterrows():
print('bond_coeff', int(b.type), b.k, '%.4f' % b['x0'])
if self.n_angles > 0:
for i, b in self.angletypes.iterrows():
print('angle_coeff', int(b.type), b.k, '%.4f' % b['x0'])
if self.n_dihedrals > 0:
for i, b in self.dihedraltypes.iterrows():
print('dihedral_coeff', int(b.type), b.k, '%.4f' % b['x0'])
if self.n_impropers > 0:
for i, b in self.impropertypes.iterrows():
print('improper_coeff', int(b.type), b.k, '%.4f' % b['x0'])
class CoarseActin:
@classmethod
def from_parameters(cls,
box_size=10000,
n_actins=10,
n_camkiis=200,
min_dist=200,
align_actins=False,
bundle=False,
system2D=False,
model='Binding-Qian2',
sname='actin',
actinLenMin=50,
actinLenMax=100):
self = cls()
# Get actin coordinates actin
pdb = prody.parsePDB(f'{__location__}/3j8i.pdb')
mean = np.array([])
for chain in 'DEF':
selection = pdb.select('chain %s' % chain)
D1 = pdb.select('chain %s and (resid 1 to 32 or resid 70 to 144 or resid 338 to 375)' % chain)
D2 = pdb.select('chain %s and (resid 33 to 69)' % chain)
D3 = pdb.select('chain %s and (resid 145 to 180 or resid 270 to 337)' % chain)
D4 = pdb.select('chain %s and (resid 181 to 269)' % chain)
m1 = D1.getCoords().mean(axis=0)
m2 = D2.getCoords().mean(axis=0)
m3 = D3.getCoords().mean(axis=0)
m4 = D4.getCoords().mean(axis=0)
mean = np.concatenate([mean, m1, m2, m3, m4], axis=0)
mean = mean.reshape(-1, 3)
actin = pandas.DataFrame(mean, columns=['x', 'y', 'z'])
name = ['A1', 'A2', 'A3', 'A4'] * 3
resid = [i for j in range(3) for i in [j] * 4]
actin.index = zip(resid, name)
# Build virtual sites
vs = self.virtual_sites_definition
for j in [2]:
for i, s in vs[vs['molecule'] == model].iterrows():
w12 = s['w12']
w13 = s['w13']
wcross = s['wcross']
a = actin.loc[[(j, s['p1'])]].squeeze() / 10
b = actin.loc[[(j, s['p2'])]].squeeze() / 10
c = actin.loc[[(j, s['p3'])]].squeeze() / 10
r12 = b - a
r13 = c - a
rcross = np.cross(r12, r13)
r = (a + w12 * r12 + w13 * r13 + wcross * rcross) * 10
r.name = (j, s['site'])
actin = actin.append(r)
actin_reference = actin.sort_index()
# Build individual actins
factin = []
for i in range(n_actins):
# Set actin length
nactins = actinLenMin + int((actinLenMax-actinLenMin) * np.random.random())
names = ['A1', 'A2', 'A3', 'A4'] * 2 + ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7'] * (nactins - 2)
resnames = ['ACD'] * (4 * 2) + ['ACT'] * (7 * (nactins - 2))
resids = [1] * 4 + [2] * 4 + [i + 3 for j in range(nactins - 2) for i in [j] * 7]
# actin_mass=41.74*1E3
Factin = prody.AtomGroup()
Factina = prody.AtomGroup()
Factinb = prody.AtomGroup()
Factin.setCoords(actin_reference)
Factina.setCoords(actin_reference[4:-3])
Factinb.setCoords(actin_reference[:-4 - 3])
for i in range(nactins - 3):
a, t = prody.superpose(Factina, Factinb)
temp0 = Factin.getCoords()
test = prody.applyTransformation(t, Factin)
temp = np.concatenate([test.getCoords(), temp0[-4 - 3:]])
# print(len(temp))
Factin = prody.AtomGroup()
Factina = prody.AtomGroup()
Factinb = prody.AtomGroup()
Factin.setCoords(temp)
Factina.setCoords(temp[4:12])
Factinb.setCoords(temp[0:8])
Factin = prody.AtomGroup()
Factin.setCoords(temp[:])
n = len(Factin)
Factin.setNames(names)
Factin.setResnames(resnames)
Factin.setResnums(resids)
# Factin.setNames(['ALA' for i in range(n)])
prody.writePDB('Factin.pdb', Factin)
print(nactins, (n - 8) / 7. + 2)
atoms = pandas.DataFrame(Factin.getCoords(), columns=['x', 'y', 'z'])
atoms['q'] = -11
atoms['molecule'] = 1
atoms['type'] = [1, 2, 3, 4] * 2 + [1, 2, 3, 4, 5, 6, 7] * (nactins - 2)
atoms['name'] = names
# atoms['mass']=[D1_mass,D2_mass,D3_mass,D4_mass]*2+([D1_mass,D2_mass,D3_mass,D4_mass,0,0,0])*(nactins-2)
atoms['resid'] = resids
atoms['resname'] = resnames
atoms.head()
factin += [atoms.copy()]
# Read camkii
camkii = self.template
# Build box
actins = []
camkiis = []
for i in range(n_actins):
d = 0
while d < min_dist:
f = factin[i][['x', 'y', 'z']].copy()
f = f - f.mean()
if align_actins:
rot = optimal_rotation(f)
else:
rot = random_rotation()
f = pandas.DataFrame(np.dot(rot, f[['x', 'y', 'z']].T).T, columns=f.columns)
f = f - f.mean()
f += [box_size / 2. for j in range(3)]
a, b, c = [box_size * np.random.random() for j in range(3)]
if bundle:
a = 0
if system2D:
c = 0
f += [a, b, c]
f -= (f.mean() > box_size) * box_size
f2 = factin[i].copy()
f2[['x', 'y', 'z']] = f[['x', 'y', 'z']]
# f+=[box_size/2. for i in range(3)]
f2['molecule'] = i + 1
f2['molecule_name'] = 'actin'
f2['resname'] = factin[i]['resname']
try:
d = sdist.cdist(f2[['x', 'y', 'z']], s[s['name'].isin(['A2', 'Cc'])][['x', 'y', 'z']]).min()
except KeyError:
d = min_dist + 100
actins += [f2]
s = pandas.concat(actins)
print("Actins in system")
print(f"Total number of particles: {len(s)}")
for i in range(n_camkiis):
d = 0
while d < min_dist:
f = camkii[['x', 'y', 'z']].copy()
f = f - f.mean()
f = pandas.DataFrame(np.dot(random_rotation(), f[['x', 'y', 'z']].T).T, columns=f.columns)
f = f - f.mean()
f += [box_size / 2. for j in range(3)]
a, b, c = [box_size * np.random.random() for j in range(3)]
if system2D:
c = box_size/10 * np.random.random()
f += [a, b, c]
f -= (f.mean() > box_size) * box_size
f2 = camkii.copy()
f2[['x', 'y', 'z']] = f[['x', 'y', 'z']]
# f+=[box_size/2. for i in range(3)]
f2['molecule'] = n_actins + i + 1
f2['molecule_name'] = 'camkii'
f2['resid'] = i + 1
f2['resname'] = 'CAM'
# f2['mass']/=100
# rr=np.random.randint(2)
# if rr==1:
# f2['type']+=2
d = sdist.cdist(f2[['x', 'y', 'z']], s[s['name'].isin(['A2', 'Cc'])][['x', 'y', 'z']]).min()
camkiis += [f2]
s = pandas.concat(actins + camkiis,sort=True)
print(f"CAMKII {i}")
print("CAMKIIs in system")
print(f"Total number of particles: {len(s)}")
s.index = np.arange(1, len(s) + 1)
s['mass']=np.nan
# Write system
ss = SystemData(s.sort_values(['molecule', 'resid', 'name']))
#ss.write_data(f'{sname}.data')
ss.write_pdb(f'{sname}.pdb')
ss.write_gro(f'{sname}.gro')
ss.print_coeff()
return self.from_topology(topology_file=f'{sname}.pdb', PlaneConstraint=system2D, periodic_box=box_size)
@classmethod
def from_topology(cls, topology_file='actin.pdb', periodic_box=10000, PlaneConstraint=False):
self = cls()
self.periodic_box = [periodic_box * 0.1] * 3
self.forcefield = openmm.app.ForceField(f'{__location__}/ff.xml')
self.top = openmm.app.PDBFile(topology_file)
self.system = self.forcefield.createSystem(self.top.topology)
self.system.setDefaultPeriodicBoxVectors(*np.diag(self.periodic_box))
self.atom_list = self.parseTop()
self.BuildVirtualSites()
self.ComputeTopology()
self.setForces(PlaneConstraint=PlaneConstraint)
return self
# Parse topology data
def parseConfigurationFile(self, configuration_file=f'{__location__}/actinff.conf'):
"""Reads the configuration file for the forcefield"""
self.configuration_file = configuration_file
print(configuration_file)
config = configparser.ConfigParser()
config.read(configuration_file)
self.template = parseConfigTable(config['Template'])
self.bond_definition = parseConfigTable(config['Bonds'])
self.bond_definition['type'] = '1' + self.bond_definition['i'].astype(str) + '-' + \
(1 + self.bond_definition['s']).astype(str) + \
self.bond_definition['j'].astype(str)
self.angle_definition = parseConfigTable(config['Angles'])
self.dihedral_definition = parseConfigTable(config['Dihedrals'])
self.repulsion_definition = parseConfigTable(config['Repulsion'])
self.virtual_sites_definition = parseConfigTable(config['Virtual sites'])
self.bond_definition = self.bond_definition[self.bond_definition.molecule.isin(['Actin-ADP', 'CaMKII'])]
self.angle_definition = self.angle_definition[self.angle_definition.molecule.isin(['Actin-ADP', 'CaMKII'])]
self.dihedral_definition = self.dihedral_definition[
self.dihedral_definition.molecule.isin(['Actin-ADP', 'CaMKII'])]
# self.virtual_sites_definition = self.virtual_sites_definition[self.virtual_sites_definition.molecule.isin(['Actin-ADP', 'CaMKII','Binding-Qian2'])]
def __init__(self):
self.parseConfigurationFile()
# self.forcefield = openmm.app.ForceField(f'{__location__}/ff.xml')
# self.top = top
# self.system = self.forcefield.createSystem(top.topology)
# self.system.setDefaultPeriodicBoxVectors(*np.diag(periodic_box))
# self.atom_list = self.parseTop()
# self.BuildVirtualSites()
# self.ComputeTopology()
# self.setForces()
# Parse topology data
def parseTop(self):
""" Converts the information from the topology to a table"""
cols = ['atom_index', 'atom_id', 'atom_name',
'residue_index', 'residue_id', 'residue_name',
'chain_index', 'chain_id']
data = []
for residue in self.top.topology.residues():
for atom in residue.atoms():
data += [[atom.index, atom.id, atom.name,
residue.index, residue.id, residue.name,
residue.chain.index, residue.chain.id]]
atom_list = pandas.DataFrame(data, columns=cols)
atom_list.index = atom_list['atom_index']
return atom_list
def BuildVirtualSites(self):
""" Sets the parameters for the virtual sites"""
virtual_sites_definition = self.virtual_sites_definition.copy()
virtual_sites_definition.index = [tuple(b) for a, b in
virtual_sites_definition[['molecule', 'site']].iterrows()]
# Actin binding sites parameters
w1 = np.array(virtual_sites_definition.loc[[('Voth-Qian2020', 'A5')], ['w12', 'w13', 'wcross']].squeeze())
w2 = np.array(virtual_sites_definition.loc[[('Voth-Qian2020', 'A6')], ['w12', 'w13', 'wcross']].squeeze())
w3 = np.array(virtual_sites_definition.loc[[('Voth-Qian2020', 'A7')], ['w12', 'w13', 'wcross']].squeeze())
# CAMKII virtual sites
cw1 = np.array(virtual_sites_definition.loc[[('CaMKII', 'C1')], ['w12', 'w13', 'wcross']].squeeze())
cw2 = np.array(virtual_sites_definition.loc[[('CaMKII', 'C2')], ['w12', 'w13', 'wcross']].squeeze())
cw3 = np.array(virtual_sites_definition.loc[[('CaMKII', 'C6')], ['w12', 'w13', 'wcross']].squeeze())
cw4 = np.array(virtual_sites_definition.loc[[('CaMKII', 'C7')], ['w12', 'w13', 'wcross']].squeeze())
# Virtual sites
for _, res in self.atom_list.groupby(['chain_index', 'residue_id']):
assert len(res['residue_name'].unique()) == 1,print(len(res['residue_name'].unique()),_,res['residue_name'].unique())
resname = res['residue_name'].unique()[0]
ix = dict(list(zip(res['atom_name'], res['atom_index'])))
if resname == 'ACT':
# Virtual site positions
a5 = openmm.OutOfPlaneSite(ix['A2'], ix['A1'], ix['A3'], w1[0], w1[1], w1[2])
a6 = openmm.OutOfPlaneSite(ix['A2'], ix['A1'], ix['A3'], w2[0], w2[1], w2[2])
a7 = openmm.OutOfPlaneSite(ix['A2'], ix['A1'], ix['A3'], w3[0], w3[1], w3[2])
# Set up virtual sites
self.system.setVirtualSite(ix['A5'], a5)
self.system.setVirtualSite(ix['A6'], a6)
self.system.setVirtualSite(ix['A7'], a7)
if resname == 'CAM':
# Parent sites
c1 = ix['Cx1']
c2 = ix['Cx2']
c3 = ix['Cx3']
# Virtual site positions
c01 = openmm.OutOfPlaneSite(c1, c2, c3, cw1[0], cw1[1], cw1[2])
c02 = openmm.OutOfPlaneSite(c1, c2, c3, cw2[0], cw2[1], cw2[2])
c03 = openmm.OutOfPlaneSite(c2, c3, c1, cw1[0], cw1[1], cw1[2])
c04 = openmm.OutOfPlaneSite(c2, c3, c1, cw2[0], cw2[1], cw2[2])
c05 = openmm.OutOfPlaneSite(c3, c1, c2, cw1[0], cw1[1], cw1[2])
c06 = openmm.OutOfPlaneSite(c3, c1, c2, cw2[0], cw2[1], cw2[2])
c07 = openmm.OutOfPlaneSite(c1, c2, c3, cw3[0], cw3[1], cw3[2])
c08 = openmm.OutOfPlaneSite(c1, c2, c3, cw4[0], cw4[1], cw4[2])
c09 = openmm.OutOfPlaneSite(c2, c3, c1, cw3[0], cw3[1], cw3[2])
c10 = openmm.OutOfPlaneSite(c2, c3, c1, cw4[0], cw4[1], cw4[2])
c11 = openmm.OutOfPlaneSite(c3, c1, c2, cw3[0], cw3[1], cw3[2])
c12 = openmm.OutOfPlaneSite(c3, c1, c2, cw4[0], cw4[1], cw4[2])
cc = openmm.ThreeParticleAverageSite(c1, c2, c3, 1 / 3., 1 / 3., 1 / 3.)
# Set up virtual positions
self.system.setVirtualSite(ix['C01'], c01)
self.system.setVirtualSite(ix['C02'], c02)
self.system.setVirtualSite(ix['C03'], c03)
self.system.setVirtualSite(ix['C04'], c04)
self.system.setVirtualSite(ix['C05'], c05)
self.system.setVirtualSite(ix['C06'], c06)
self.system.setVirtualSite(ix['C07'], c07)
self.system.setVirtualSite(ix['C08'], c08)
self.system.setVirtualSite(ix['C09'], c09)
self.system.setVirtualSite(ix['C10'], c10)
self.system.setVirtualSite(ix['C11'], c11)
self.system.setVirtualSite(ix['C12'], c12)
self.system.setVirtualSite(ix['Cc'], cc)
self.atom_list['Virtual'] = [self.system.isVirtualSite(a) for a in range(len(self.atom_list))]
def ComputeTopology(self):
# print(bonds)
# Bonds, angles and dihedrals
bonds = []
angles = []
dihedrals = []
for _, c in self.atom_list.groupby('chain_index'):
ix = {}
for name, aa in c.groupby('atom_name'):
ix.update({name: list(aa.index)})
for SB, B in zip([bonds, angles, dihedrals],
[self.bond_definition, self.angle_definition, self.dihedral_definition]):
for _, b in B.iterrows():
temp = pandas.DataFrame(columns=B.columns)
if 's' not in b:
b['s'] = 0
if b['i'] not in ix.keys():
continue
i1 = ix[b['i']][b['s']:]
i2 = ix[b['j']][:-b['s']] if b['s'] != 0 else ix[b['j']]
assert (len(i1) == len(i2))
temp['i'] = i1
temp['j'] = i2
if 'k' in b:
i3 = ix[b['k']]
assert (len(i1) == len(i3))
temp['k'] = i3
if 'l' in b:
i4 = ix[b['l']]
assert (len(i1) == len(i4))
temp['l'] = i4
for col in temp:
if col not in ['i', 'j', 'k', 'l']:
temp[col] = b[col]
SB += [temp]
bonds = pandas.concat(bonds, sort=False)
bonds.sort_values(['i', 'j'], inplace=True)
angles =
|
pandas.concat(angles, sort=False)
|
pandas.concat
|
"""
Arthur: <NAME>
Purpose: This module is used to reperesent the models defined in the dissertation. In addition,
simulations of those models
Date: 09/02/21
"""
from itertools import product
import numpy as np
from scipy.integrate import odeint
import pandas as pd
def newtons_cooling_law(delta, simulations, save, initial_paras = None):
"""
newtons_cooling_law:
This is used for reperesenting the newtons cooling law in our data
"""
def f(state, t):
temp = state
dtempdtime = -0.015*(temp - 22)
return dtempdtime
initial_temp = [i for i in range(1, 61)]
initial_temp = range(1, 61, 1) # we want initial parameters to be x1, x2, x2 ... x9 (0.99, 1.01)
try:
if (initial_paras != None).any():
initial_temp = initial_paras
except AttributeError:
pass
newton = []
time = np.arange(0, simulations, delta)
for t_0 in initial_temp:
states_0 = [t_0]
state = odeint(f, states_0, time)
df = pd.DataFrame(data={'time' : time, 'temp' : state[:, 0]})
df['initial_temp'] = t_0
newton.append(df)
newton = pd.concat(newton)
if save:
newton.to_csv("data/train/newtons_cooling_law.csv", index = False)
return newton
def van_der_pol_oscillator(delta, simulations, save, initial_paras = None):
"""
van der pol oscillator:
This is used for running a simulation of the van der pol oscillator model
"""
MU = 0.5
def f(state, t):
x, y = state
dxdt = y
dydt = MU*(1 - x * x) * y - x
return dxdt, dydt
van_df = []
#100.1
time = np.arange(0, simulations, delta)
ranges = range(1,5)
try:
if (initial_paras != None).any():
ranges = initial_paras
except AttributeError:
pass
for init_x in ranges:
for init_y in ranges:
states_0 = [init_x, init_y]
state = odeint(f, states_0, time)
df = pd.DataFrame(data={'time' : time, 'x' : state[:, 0], 'y' : state[:, 1]})
df['initial_x'] = init_x
df['initial_y'] = init_y
van_df.append(df)
van_df = pd.concat(van_df)
if save:
van_df.to_csv("data/train/van.csv", index = False)
return van_df
def laub_loomis(delta, simulations, save, initial_paras = None):
"""
laub_loomis:
This is used for reperesenting the laun loomis be
"""
def f(state, t):
x,y,z,w,p,q,m = state
func_1 = 1.4 * z - 0.9 * x
func_2 = 2.5 * p - 1.5 * y
func_3 = 0.6 * m - 0.8 * y * z
func_4 = 2 - 1.3 * z * w
func_5 = 0.7 * x - w * p
func_6 = 0.3 * x - 3.1 * q
func_7 = 1.8 * q - 1.5 * y * m
return func_1, func_2, func_3, func_4, func_5, func_6, func_7
MIN = 1
MAX = 3
STEP = 1
ranges = range(MIN,MAX,STEP)
try:
if (initial_paras != None).any():
ranges = initial_paras
except AttributeError:
pass
laub_loomis = []
time = np.arange(0, simulations, delta) # 0 , 500
for x, y, z, w, p, q, m in product(ranges, ranges, ranges, ranges, ranges, ranges, ranges):
states_0 = [x, y, z, w, p, q, m]
state = odeint(f, states_0, time)
data = {'time' : time, 'x' : state[:, 0], 'y' : state[:, 1], 'z' : state[:, 2],
'w' : state[:, 3], 'p' : state[:, 4], 'q' : state[:, 5], 'm' : state[:, 6]}
df = pd.DataFrame(data=data)
df['initial_x'] = x
df['initial_y'] = y
df['initial_z'] = z
df['initial_w'] = w
df['initial_p'] = q
df['initial_q'] = p
df['initial_m'] = m
laub_loomis.append(df)
laub_loomis = pd.concat(laub_loomis)
if save:
laub_loomis.to_csv("data/train/laub.csv", index = False)
return laub_loomis
def biological_model(delta, simulations, save, initial_paras = None):
"""
This repersents a biological model
"""
def f(state, t):
x1, x2, x3, x4, x5, x6, x7, x8, x9 = state
dx1dt = 3 * x3 - x1 * x6
dx2dt = x4 - x2 * x6
dx3dt = x1 * x6 - 3 * x3
dx4dt = x2 * x6 - x4
dx5dt = 3 * x3 + 5 * x1 - x5
dx6dt = 5 * x5 + 3 * x3 + x4 - x6 * (x1 + x2 + 2 * x8 + 1)
dx7dt = 5 * x4 + x2 - 0.5 * x7
dx8dt = 5 * x7 - 2 * x6 * x8 + x9 - 0.2 * x8
dx9dt = 2 * x6 * x8 - x9
return dx1dt, dx2dt, dx3dt, dx4dt, dx5dt, dx6dt, dx7dt, dx8dt, dx9dt
time = np.arange(0, simulations, delta)
biological_model = []
filename = 'data/train/biological_model.csv'
ranges = np.arange(0.99, 1.01, 0.02) # we want initial parameters to be x1, x2, x2 ... x9 (0.99, 1.01)
try:
if (initial_paras != None).any():
ranges = initial_paras
except AttributeError:
pass
for init_x1, init_x2, init_x3, init_x4, init_x5, init_x6, init_x7, init_x8, init_x9 in product(ranges, ranges, ranges, ranges, ranges, ranges, ranges, ranges, ranges):
states_0 = [init_x1, init_x2, init_x3, init_x4, init_x5, init_x6, init_x7, init_x8, init_x9]
state = odeint(f, states_0, time)
data = {'time' : time, 'x1' : state[:, 0], 'x2' : state[:, 1], 'x3' : state[:, 2], 'x4' : state[:, 3],
'x5' : state[:, 4], 'x6' : state[:, 5], 'x7' : state[:, 6], 'x8' : state[:, 7], 'x9' : state[:, 8],}
df =
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
import datetime
import pandas as pd
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from urllib.parse import urlencode
def start_from_home_page(driver):
# go to home page
HOME_PAGE_URL = "https://esales.hdb.gov.sg/bp25/launch/19sep/bto/19SEPBTO_page_2671/about0.html#" # noqa
driver.get(HOME_PAGE_URL)
# hover over Punggol's dropdown menu and click the availability button
punggol_dropdown = driver.find_element_by_xpath(
'//*[@id="bto-icon-nav"]/section/ul/li[1]'
)
availability_redirect = driver.find_element_by_xpath(
'//*[@id="bto-icon-nav"]/section/ul/li[1]/ul/li[11]/a'
)
hover_action = (
ActionChains(driver)
.move_to_element(punggol_dropdown)
.move_to_element(availability_redirect)
)
hover_action.click().perform()
# this will open up a new tab, switch to that new tab
driver.switch_to_window(driver.window_handles[1])
# wait for the new tab to load completely
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "searchButtonId"))
)
# select 5-room
driver.find_element_by_xpath('//*[@id="Flat"]/option[4]').click()
driver.find_element_by_xpath('//*[@id="searchButtonId"]').click()
return driver
def start_from_5room_URL(driver):
# Construct URL
FIVE_ROOM_URL_PARAMS = (
("Town", "PUNGGOL"),
("Flat_Type", "BTO"),
("DesType", "A"),
("ethnic", "Y"),
("Flat", "5-Room"),
("ViewOption", "A"),
("dteBallot", "201909"),
("projName", "A"),
("brochure", "true"),
)
FIVE_ROOM_URL = "https://services2.hdb.gov.sg/webapp/BP13AWFlatAvail/BP13EBSFlatSearch?" # noqa
FIVE_ROOM_URL = FIVE_ROOM_URL + urlencode(FIVE_ROOM_URL_PARAMS)
driver.get(FIVE_ROOM_URL)
return driver
def get_all_block_details(driver):
table = driver.find_element_by_xpath(
'//*[@id="blockDetails"]/div[1]/table/tbody'
)
blocks_row_and_col = []
for row_number, row in enumerate(table.find_elements_by_xpath(".//tr")):
for col_number, col in enumerate(row.find_elements_by_xpath(".//td")):
blocks_row_and_col.append((row_number + 1, col_number + 1))
all_details = []
for row, col in blocks_row_and_col:
block = driver.find_element_by_xpath(
'//*[@id="blockDetails"]/div[1]/table/tbody/'
f'tr[{row}]/td[{col}]/div/font/a/font'
)
block_number = block.text
block.click()
all_details.extend(get_block_details(block_number, driver.page_source))
return all_details
def get_block_details(block_number, page_source):
soup = BeautifulSoup(page_source, 'html.parser')
units = (
soup.find("div", {"id": "blockDetails"})
.find_all("table")[1]
.find("tbody")
.find_all("td")
)
price_regex = r"^(\$\d+,\d+)<br>_*<br>(\d+)"
units_details = []
for unit in units:
detail = unit.find("font").attrs
print(detail)
if detail.get("color") == "#cc0000":
_id = unit.find("font").text.strip()
floor, unit_number = _id.split("-")
units_details.append({
"block_number": block_number,
"floor": floor,
"unit_number": unit_number,
"unit_available": False
})
else:
# get floor unit number
_id = detail["id"]
floor, unit_number = _id.split("-")
# get price and area
price_area_info = soup.find("span", {"data-selector": _id}).attrs
price, area = re.findall(price_regex, price_area_info["title"])[0]
units_details.append({
"block_number": block_number,
"floor": floor,
"unit_number": unit_number,
"price": price,
"area": area,
"unit_available": True
})
return units_details
if __name__ == '__main__':
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
driver = webdriver.Chrome(
"/usr/local/bin/chromedriver",
options=options
)
driver = start_from_5room_URL(driver)
# driver = start_from_home_page(driver)
all_details = get_all_block_details(driver)
df =
|
pd.DataFrame(all_details)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
# import lightgbm as lgb
import loadsave as ls
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from keras import backend as K
from sklearn import preprocessing
from sklearn.utils import class_weight
from collections import Counter
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.5
# config.gpu_options.visible_device_list = "0"
# set_session(tf.Session(config=config))
def setDayTime(row):
row['isWeekend'] = np.where(((row['tweekday'] == 5) | (row['tweekday'] == 6)),1,0)
row['isLateNight'] = np.where(((row['thour'] <= 7) | (row['thour'] >= 22)),1,0)
row['isNight'] = np.where(((row['thour'] <= 3) | (row['thour'] >= 19)),1,0)
row['isEarlyMorn'] = np.where(((row['thour'] >= 7) & (row['thour'] <= 12)),1,0)
row['isDay'] = np.where(((row['thour'] >= 10) & (row['thour'] <= 17)),1,0)
row['isNoon'] = np.where(((row['thour'] >= 15) & (row['thour'] <= 21)),1,0)
def isWeekend(row):
if row['tweekday'] == 5 or row['tweekday'] == 6:
return 1
else:
return 0
def isLateNight(row):
if row['thour'] <= 7 or row['thour'] >= 22:
return 1
else:
return 0
def isNight(row):
if row['thour'] <= 3 or row['thour'] >= 19:
return 1
else:
return 0
def isEarlyMorn(row):
if row['thour'] >= 7 and row['thour'] <= 12:
return 1
else:
return 0
def printConfMat(y_true, y_pred):
confMat=(metrics.confusion_matrix(y_true, y_pred))
print(" ")
print(confMat)
print(0,confMat[0][0]/(confMat[0][0]+confMat[0][1]))
print(1,confMat[1][1]/(confMat[1][1]+confMat[1][0]))
def isDay(row):
if row['thour'] >= 10 and row['thour'] <= 17:
return 1
else:
return 0
def isNoon(row):
if row['thour'] >= 15 and row['thour'] <= 21:
return 1
else:
return 0
isPreprocess= False
if (isPreprocess):
print("Reading data.....")
train =
|
pd.read_csv("input/train.csv")
|
pandas.read_csv
|
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import pandas as pandas
import plotly.graph_objects as go
from apps import commonmodules
from app import app
from scipy.stats import multinomial, uniform, expon
import numpy as np
import os
from os import path
import re, ast
import pickle
# suppress sklearn warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# saves last status of prediction result (ok / rework / scrap)
global_index_spanen = [0]
# tab styles
tabs_styles = {
'height': '54px',
'font-size': '120%',
'text-align':"center",
'display': 'inline-block',
}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold',
'text-align': "center",
'display': 'inline-block',
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'padding': '6px',
'fontWeight': 'bold',
'text-align':"center",
'display': 'inline-block',
}
config = dict({'scrollZoom': True})
# page content of page 'spanen'
layout = html.Div([
commonmodules.get_header(),
html.Br(),
html.H4('Station Spanen', style={'font-weight': 'bold', 'text-decoration': 'underline'}),
html.Hr(),
html.H4('Klassifizierungsergebnisse', style={'font-weight': 'bold'}),
html.Br(),
html.Div([
dbc.Row([
dbc.Col([
# bar plot of process parameters, updated by callbacks
html.Div([
dbc.Row([
html.Div([
dcc.Graph(id='fig1_kraft_spanen'),
], style={'textAlign': 'center', 'width': '100%'}),
]),
dbc.Row([
html.Div(
[
dcc.Graph(id='fig2_leistung_spanen'),
], style={'textAlign': 'center', 'width': '100%'}),
]),
],
className = "pretty_container",),
# containers with evaluation (ok / rework / scrap) and accuracy measures (accuracy, f1score, precision, sensitivity)
dbc.Row(
[
html.Div(
[
html.Div(
id="category",
className="pretty_container",
),
html.Div(
id="accuracy",
className="pretty_container"
),
html.Div(
id="f1score",
className="pretty_container"
),
html.Div(
id="precision",
className="pretty_container"
),
html.Div(
id="sensitivity",
className="pretty_container"
),
],
id="fiveContainer",
)
],
id="infoContainer",
),
],
width=6),
# plot with test data and newly generated data points
# slider for increasing / decreasing training data
dbc.Col([
html.Div([
html.H5("Gelernte Entscheidungsgrenzen", style={"text-align": "center",'font-weight': 'bold'}),
html.Br(),
html.Div([
dcc.Graph(id='fig3_classification_spanen'),
html.H6("Anzahl an Datenpunkten (Training + Test):", style={"text-align": "center",'font-weight': 'bold'}),
dcc.Slider(
id='dataset-slider_spanen',
min=1000,
max=3000,
value=1000,
marks={
1000: '# data: 1000 ',
2000: '2000',
3000: '3000',
},
step=1000,
)
], style={'textAlign': 'center', 'width': '100%'}),
],
className = "pretty_container",),
],
width=6),
],
align="center",
),
], className="flex-hd-row, flex-column align-items-center p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm"),
html.Hr(style={'height': '30px', 'font-weight': 'bold'}),
# recommendation: either OK and go on to station 'lackieren' OR rework at station 'spanen' OR scrap and go to 'anmeldung'
# updated in callbacks
html.H5('Handlungsempfehlung', style={'font-weight': 'bold'}),
html.Br(),
# Buttons for folding/unfolding options: go on to station 'lackieren', rework at station 'spanen', scrap and go to 'anmeldung'
html.Div(
className="flex-hd-row, flex-column p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm",
id="recommendation_spanen"),
html.Br(),
html.Div(
[
dbc.Button(
"Weitere Handlungsoptionen",
id="collapse-button-options_spanen",
className="mb-3",
color="primary",
style={'font-size': '100%'},
),
dbc.Collapse(
html.Div([
html.Div(
[
dbc.Alert(
[
"Weiter zur Station ",
html.A("Lackieren", href="/lackieren", className="alert-link"),
],
color="success",
style={'font-size': '130%'},
),
dbc.Alert(
[
"Nachbearbeitung an Station ",
html.A("Spanen", href="/spanen", className="alert-link"),
],
color="warning",
style={'font-size': '130%'},
),
dbc.Alert(
[
"Klassifiziere Bauteil als ",
html.A("Ausschuss", href="/", className="alert-link"),
],
color="danger",
style={'font-size': '130%'},
),
]
),
], className="flex-hd-row, flex-column p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm"), # d-flex
id="collapse-options_spanen",
),
],
),
html.Hr(),
# button for unfolding detailed information
html.H5('Detailinformationen', style={'font-weight': 'bold'}),
html.Br(),
html.Div(
[
dbc.Button(
"Details ein-/ausblenden",
id="collapse-button-details",
className="mb-3",
color="primary",
style={'font-size': '100%'},
),
dbc.Collapse(
html.Div([
dcc.Tabs(id='tabs-spanen', value='tab-1', children=[
dcc.Tab(label='Konfusionsmatrix', style=tab_style, selected_style=tab_selected_style, children=[
html.Div([
dbc.Row([
dbc.Col(
html.Div([
],
style = {'width': '100 %', "text-align": "center"}, id='spanen_confusion_absolute', # 'display': "flex"
),
),
dbc.Col(
html.Div([
],
style={'width': '100 %', "text-align": "center"}, id ='spanen_confusion_normalised', # 'display': "flex"
),
),
], align="center",)
], className="flex-hd-row flex-column p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm"), # d-flex
]),
dcc.Tab(label='Wirtschaftliche Bewertung', style=tab_style, selected_style=tab_selected_style, children=[
html.Div([
html.H6("Kostensenkung durch Eliminierung der Qualitätskontrolle: "),
html.Ul(
html.Li("Ø 5 Arbeitsstunden mit Personalkostensatz von 50 €/h = 250€"),
),
html.H6(id='increase_in_costs_spanen'),
html.Ul(
[
],
id='cost_misclassification_spanen',
),
html.H6(id='savings_spanen')
], className="flex-hd-row flex-column p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm"), #style={'width': '100 %', "text-align": "center"}
]),
], style=tabs_styles),
], className="flex-column, flex-hd-row p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm"), # d-flex
id="collapse-details",
),
],
),
html.Hr(),
html.Div(id='hidden-div-spanen', style={'display':'none'})
])
# reset slider status in temp .csv file on page reload
@app.callback([
Output('hidden-div-spanen','value')
],[
Input('url','pathname'),
])
def reset_slider_status(pathname):
if pathname == '/spanen':
file = open("temp/temp_spanen_slider.csv", "w")
file.write(str(1000) + "\n")
file.close()
return [None]
# button for collapsing options
@app.callback(
Output("collapse-options_spanen", "is_open"),
[Input("collapse-button-options_spanen", "n_clicks")],
[State("collapse-options_spanen", "is_open")],
)
def toggle_collapse_options(n, is_open):
if n:
return not is_open
return is_open
# callback for collapsing detailed information (confusion matrix, economic evaluation)
@app.callback(
Output("collapse-details", "is_open"),
[Input("collapse-button-details", "n_clicks")],
[State("collapse-details", "is_open")],
)
def toggle_collapse_options(n, is_open):
if n:
return not is_open
return is_open
# update bar graphs, update classification plot, update accuracy metrics, update recommendation
@app.callback([
# bar graphs
Output('fig1_kraft_spanen', 'figure'),
Output('fig2_leistung_spanen', 'figure'),
# classification plot
Output('fig3_classification_spanen', 'figure'),
# update category container (colored in red / green / orange)
Output('category', 'children'),
Output('category', 'style'),
# update recommendation for user
Output('recommendation_spanen', 'children'),
# update accuracy metrics (based on number on training points)
Output('accuracy', 'children'),
Output('f1score', 'children'),
Output('precision', 'children'),
Output('sensitivity', 'children'),
# update confusion matrix (based on number of training points)
Output('spanen_confusion_absolute', 'children'),
Output('spanen_confusion_normalised', 'children'),
],[
# input url
Input('url','pathname'),
# input data slider status
Input('dataset-slider_spanen','value'),
])
def update_inputs(pathname, slider_status):
# save number of training data from status of slider
n_train = slider_status
# load old slider status from file
if path.exists("temp/temp_spanen_slider.csv"):
f = open("temp/temp_spanen_slider.csv", "r")
old_slider_status = int(f.read())
f.close()
else:
old_slider_status= None
# write new slider status to file
file = open("temp/temp_spanen_slider.csv", "w")
file.write(str(slider_status) + "\n")
file.close()
if pathname == '/spanen':
# load training, test data and accuracy metrics
with open("assets/spanen/spanen_knn_data_" + str(n_train) + ".csv") as mycsv:
count = 0
for line in mycsv:
if count == 0:
prediction_test_load = line
if count == 1:
testdata_load = line
if count == 2:
xx_decision_line = line
if count == 3:
yy_decision_line = line
if count == 4:
color_decision_line = line
if count == 5:
prediction_train_load = line
if count == 6:
traindata_load = line
if count == 7:
classification_report = line
if count == 8:
break
count += 1
# transform strings to numpy lists, while conserving np.array dimensions
prediction_test_load = re.sub('\s+', '', prediction_test_load)
prediction_test_scatter = ast.literal_eval(prediction_test_load)
prediction_train_load = re.sub('\s+', '', prediction_train_load)
prediction_train_scatter = ast.literal_eval(prediction_train_load)
testdata_load = re.sub('\s+', '', testdata_load)
testdata_load = np.asarray(ast.literal_eval(testdata_load))
x_test_scatter = np.round(testdata_load[:, 0], 2).tolist()
y_test_scatter = np.round(testdata_load[:, 1], 2).tolist()
traindata_load = re.sub('\s+', '', traindata_load)
traindata_load = np.asarray(ast.literal_eval(traindata_load))
x_train_scatter = np.round(traindata_load[:, 0], 2).tolist()
y_train_scatter = np.round(traindata_load[:, 1], 2).tolist()
xx_decision_line = re.sub('\s+', '', xx_decision_line)
x_cont = ast.literal_eval(xx_decision_line)
yy_decision_line = re.sub('\s+', '', yy_decision_line)
y_cont = ast.literal_eval(yy_decision_line)
color_decision_line = re.sub('\s+', '', color_decision_line)
z_cont = np.asarray(ast.literal_eval(color_decision_line))
classification_report = re.sub('\s+', '', classification_report)
classification_report = ast.literal_eval(classification_report)
# get accuracy metrics from classification_report
accuracy = np.round(classification_report['accuracy'], 2)
f1_score = np.round(classification_report['macroavg']['f1-score'], 2)
precision_gutteil = np.round(classification_report['0.0']['precision'], 2)
sensitivity_gutteil = np.round(classification_report['0.0']['recall'], 2)
precision_nachbearbeiten = np.round(classification_report['1.0']['precision'], 2)
sensitivity_nachbearbeiten = np.round(classification_report['1.0']['recall'], 2)
precision_ausschuss = np.round(classification_report['2.0']['precision'], 2)
sensitivity_ausschuss = np.round(classification_report['2.0']['recall'], 2)
# load old process parameters from file
if path.exists("temp/temp_process_params_spanen.csv"):
f = open("temp/temp_process_params_spanen.csv", "r")
process_params_load1 = f.read()
f.close()
process_params_load2 = re.sub('\s+', '', process_params_load1)
process_params = ast.literal_eval(process_params_load2)
# only simulate a new data point on page refresh
if slider_status == old_slider_status or path.exists("temp/temp_process_params_spanen.csv") == False:
index = global_index_spanen[-1]
while global_index_spanen[-1] == index:
# create randomized process parameters from multivariate gaussian
p1 = 0.4 # probability for component OK
p2 = 0.3 # probability for component needs rework
p3 = 0.3 # probability for component is scrap part
draw = multinomial.rvs(1, [p1, p2, p3])
index = int(np.where(draw == 1)[0])
global_index_spanen.append(index)
# create data point which will be classfied as okay
if index == 0:
mean = [2.5, 1.5]
cov = [[1.4, 1], [1.2, 0.5]]
bool = True
while bool:
F, P = np.random.multivariate_normal(mean, cov, 1).T
if F > 0.5 and F < 5 and P > 0 and P < 3:
bool = False
# create data point which will be classified as rework
elif index == 1:
draw2 = multinomial.rvs(1, [0.5, 0.5])
index2 = np.where(draw2 == 1)
# power too high
if index2[0] == 0:
P = expon.rvs(3.5, 0.3, size=1)
F = uniform.rvs(3.5, 1.5, size=1)
# force too low or too high
elif index2[0] == 1:
draw3 = multinomial.rvs(1, [0.5, 0.5])
index3 = np.where(draw3 == 1)
# force too low
if index3[0] == 0:
P = uniform.rvs(0.5, 1, size=1)
F = uniform.rvs(0, 0.25, size=1)
# force too high
elif index3[0] == 1:
P = uniform.rvs(2, 0.5, size=1)
F = expon.rvs(5.5, 0.2, size=1)
# create data point which will be classified as scrap: power and force too high
elif index == 2:
P = expon.rvs(3.5, 0.3, size=1) # loc, scale, size
F = expon.rvs(5.5, 0.2, size=1)
process_params = [P.tolist(), F.tolist()]
# save process_params to temp file
file = open("temp/temp_process_params_spanen.csv", "w")
file.write(str(process_params) + "\n")
file.close()
# load confusion matrix
spanen_confusion_absolute_callback = html.Div([
html.Img(src=app.get_asset_url('spanen/spanen_confusion_absolute_' + str(n_train) + '.png'))
],)
spanen_confusion_normalised_callback = html.Div([
html.Img(src=app.get_asset_url('spanen/spanen_confusion_normalised_' + str(n_train) + '.png'))
],)
# plot bar graph of force
fig1_callback = go.Figure()
fig1_callback.add_trace(go.Indicator(
mode="number+gauge", value=process_params[1][0], number={'font': {'size': 30}},
domain={'x': [0.25, 1], 'y': [0.3, 0.7]},
title={'text': "Kraft in kN", 'font': {'size': 20}},
gauge={
'shape': "bullet",
'axis': {'range': [0, 8]},
'threshold': {
'line': {'color': 'black', 'width': 5},
'thickness': 0.75,
'value': 5},
'steps': [
{'range': [0, 0.5], 'color': "lightgray"},
{'range': [0.5, 5], 'color': "green"},
{'range': [5, 8], 'color': "lightgray"}],
'bar': {
'color': 'black'}
},
),
)
fig1_callback.update_layout(autosize=True, height=150, margin={'t': 0, 'b': 0, 'l': 0, 'r': 0},
paper_bgcolor="#f9f9f9", )
# plot bar graph of power
fig2_callback = go.Figure()
fig2_callback.add_trace(go.Indicator(
mode="number+gauge", value=process_params[0][0], number={'font': {'size': 30}},
# delta = {'reference': 200},
domain={'x': [0.25, 1], 'y': [0.3, 0.7]},
title={'text': "Leistung in kW", 'font': {'size': 20}},
gauge={
'shape': "bullet",
'axis': {'range': [0, 6]},
'threshold': {
'line': {'color': 'black', 'width': 5},
'thickness': 0.75,
'value': 3},
'steps': [
{'range': [0, 3], 'color': "green"},
{'range': [3, 6], 'color': "lightgray"}],
'bar': {'color': 'black'},
},
)
)
fig2_callback.update_layout(autosize=True, height=150, margin={'t': 0, 'b': 0, 'l': 0, 'r': 0},
paper_bgcolor="#f9f9f9", )
# update info boxes with accuracy metrics
accuracy_callback = html.Div(
[
html.H6("Genauigkeit", style={"text-align": "center", 'font-weight': 'bold'}),
html.Br(),
html.H4(str(accuracy), style={"text-align": "center", 'font-weight': 'bold'}, ),
html.Br(),
],
),
f1_score_callback = html.Div(
[
html.H6("F1-Score",
style={"text-align": "center", 'font-weight': 'bold'}),
html.Br(),
html.H4(str(f1_score), style={"text-align": "center", 'font-weight': 'bold'}, ),
html.Br(),
],
),
precision_callback = html.Div(
[
html.H6("Präzision", style={"text-align": "center", 'font-weight': 'bold'}),
html.Br(),
html.P("Gutteil: " + str(precision_gutteil), style={"text-align": "center"}, ),
html.P("Nachbearb.: " + str(precision_nachbearbeiten), style={"text-align": "center"}, ),
html.P("Ausschuss: " + str(precision_ausschuss), style={"text-align": "center"}, ),
],
),
sensitivity_callback = html.Div(
[
html.H6("Sensitivität", style={"text-align": "center", 'font-weight': 'bold'}),
html.Br(),
html.P("Gutteil: " + str(sensitivity_gutteil), style={"text-align": "center"}, ),
html.P("Nachbearb.: " + str(sensitivity_nachbearbeiten), style={"text-align": "center"}, ),
html.P("Ausschuss: " + str(sensitivity_ausschuss), style={"text-align": "center"}, ),
],
),
# create dataframe for scattered training and test data
df_test =
|
pandas.DataFrame({'x_test_scatter': x_test_scatter, 'y_test_scatter': y_test_scatter, 'prediction_test_scatter': prediction_test_scatter})
|
pandas.DataFrame
|
import os
import pandas as pd
import geopandas as gpd
import numpy as np
def integrate_data(data, sheet_name, category, dff_dict, demand_links,
init_year, end_year, var_name='links', target='point'):
df = data.parse(sheet_name, skiprows=3)
df.rename(columns={'Unnamed: 0': 'Date'}, inplace=True)
df.columns = df.columns.str.replace('"', '').str.strip()
df.columns = df.columns.str.replace('Groundwater','GW')
df.columns = df.columns.str.replace('Grounwater','GW')
df.columns = df.columns.str.replace('GW of ','')
df.columns = df.columns.str.replace('GW ','')
df.columns = df.columns.str.replace('I_TRSPD','I_Traditional Rehabilite du Souss Perimetre Diffus')
# df.columns = df.columns.str.replace('I_Traditional Rehabilite I','I_Traditional Rehabilite Issen')
for link in demand_links.links:
if np.array(df.columns[df.columns.str.contains(link)]).size > 0:
df.rename(columns={df.columns[df.columns.str.contains(link)][0]: link}, inplace=True)
df = df.loc[df.Date!='Sum']
df.Date = pd.to_datetime(df.Date)
df['Year'] = df.Date.dt.year
df['Month'] = df.Date.dt.month
drop_columns = []
if 'Sum' in df.columns:
drop_columns.append('Sum')
df.drop(columns=drop_columns, inplace=True)
df = df.loc[(df.Year >= init_year) & (df.Year <= end_year)]
df = df.melt(id_vars=['Date', 'Year', 'Month'])
for name, dff in dff_dict.items():
df_temp = dff.set_index(var_name)
if var_name!=target:
df[name] = df.variable.map(df_temp[target])
df['type'] = category
df.rename(columns={'variable': var_name}, inplace=True)
if df.loc[~df[var_name].isin(df.dropna()[var_name].unique()),var_name].unique().size > 0:
print("The following links were not found:")
print(df.loc[~df[var_name].isin(df.dropna()[var_name].unique()),var_name].unique())
return df
def data_merging(demand_points, supply_points, pipelines):
df1 = demand_points.groupby('point').agg({'type': 'first',
'geometry': 'first'}).reset_index()
df2 = supply_points.groupby('point').agg({'type': 'first',
'geometry': 'first'}).reset_index()
df_pipelines = pipelines.groupby('diversion').agg({'geometry': 'first'}).reset_index()
df = df1.append(df2, ignore_index=True)
df['lon'] = [point.xy[0][0] for point in df.geometry]
df['lat'] = [point.xy[1][0] for point in df.geometry]
pipe_coords =
|
pd.DataFrame({'lon': [], 'lat': []})
|
pandas.DataFrame
|
import json
import logging
import pathlib
from typing import Union, Optional
import pandas
from arclus.utils import is_blank
class DatasetReader:
"""General class for reading datasets."""
#: The root directory.
root: pathlib.Path
def __init__(
self,
root: Union[pathlib.Path, str],
claim_id: int = 0,
premise_id: int = 0,
):
if not isinstance(root, pathlib.Path):
root = pathlib.Path(root)
self.root = root
self.offset_claim_id = claim_id
self.offset_premise_id = premise_id
self._claims = []
self._premises = []
self._claim_premises = []
self._read()
def _add_claim(self, text: str) -> Optional[int]:
"""
Add a claim to the dataset.
:param text:
The claim text.
:return:
The claim ID.
"""
if is_blank(text=text):
return None
claim_id = len(self._claims) + self.offset_claim_id
self._claims.append(dict(
claim_text=text,
claim_id=claim_id,
source=self.name,
))
return claim_id
def _add_premise(self, text: str, stance: str) -> Optional[int]:
"""
Add a premise to the dataset.
:param text:
The premise text.
:return:
The premise ID.
"""
if is_blank(text=text):
return None
premise_id = len(self._premises) + self.offset_premise_id
self._premises.append(dict(
premise_text=text,
premise_id=premise_id,
stance=stance,
source=self.name,
))
return premise_id
def _add_claim_premise(self, premise_id: Optional[int], claim_id: Optional[int]) -> None:
"""
Add a link between premise_id and claim_id.
If any of the IDs is None, no link is added.
"""
if None not in {claim_id, premise_id}:
self._claim_premises.append(dict(
premise_id=premise_id,
claim_id=claim_id,
))
def __str__(self):
return f'Dataset(name={self.name}, num_claims={len(self._claims)}, num_premises={len(self._premises)})'
@property
def max_claim_id(self) -> int:
return self.offset_claim_id + len(self._claims)
@property
def max_premise_id(self) -> int:
return self.offset_premise_id + len(self._premises)
@property
def premises(self) -> pandas.DataFrame:
return pandas.DataFrame(self._premises)
@property
def claims(self) -> pandas.DataFrame:
return pandas.DataFrame(self._claims)
@property
def claims_premises(self) -> pandas.DataFrame:
return pandas.DataFrame(self._claim_premises)
@property
def name(self) -> str:
"""The name of the dataset."""
raise NotImplementedError
def _read(self):
"""Read the dataset."""
raise NotImplementedError
class DebatePediaOrgReader(DatasetReader):
"""DebatePediaOrg dataset."""
@property
def name(self) -> str:
return 'DebatePediaOrg'
def _read(self):
for index, file_path in enumerate(self.root.iterdir()):
with file_path.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
for claim_data in data:
if len(claim_data['pros']) + len(claim_data['cons']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = claim_data['claimText']
claim_id = self._add_claim(text=claim_text)
for premise_data, stance in [(claim, stance) for stance in ('Pro', 'Con') for claim in
claim_data[stance.lower() + 's']]:
premise_text = premise_data['premiseText']
premise_id = self._add_premise(text=premise_text, stance=stance)
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
class DebateOrgReader(DatasetReader):
"""DebateOrg dataset."""
@property
def name(self) -> str:
return 'DebateOrg'
def _read(self):
with self.root.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
for claim_data in data:
if len(claim_data['pros']) + len(claim_data['cons']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = claim_data['title']
claim_id = self._add_claim(claim_text)
for premise_data, stance in [(claim, stance) for stance in ('Pro', 'Con') for claim in
claim_data[stance.lower() + 's']]:
premise_text = premise_data['text']
premise_id = self._add_premise(text=premise_text, stance=stance)
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
class DebateWiseReader(DatasetReader):
"""DebateWise dataset."""
@property
def name(self) -> str:
return 'debatewise'
def _read(self):
for index, file_path in enumerate(self.root.iterdir()):
with file_path.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
if len(data['ArgumentList']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = data['MetaData']['Title']
claim_id = self._add_claim(text=claim_text)
for premise_data in data['ArgumentList']:
premise_text = premise_data['Argument']['Premise'][0]
premise_id = self._add_premise(text=premise_text,
stance=premise_data['Argument']['PremiseStance'][0])
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
class IDebateOrgReader(DatasetReader):
"""iDebateOrg dataset."""
@property
def name(self) -> str:
return 'iDebateOrg'
def _read(self):
for index, file_path in enumerate(self.root.iterdir()):
with file_path.open(mode='r', errors='ignore') as json_data:
data = json.load(json_data)
if len(data['pros']) + len(data['cons']) == 0:
logging.warning('Skipping empty file')
continue
claim_text = data['title']
claim_id = self._add_claim(claim_text)
for premise_data_pro in (data['pros']):
premise_text = premise_data_pro['text point pro claim']
premise_id = self._add_premise(text=premise_text, stance="Pro")
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
for premise_data_con in (data['cons']):
premise_text = premise_data_con['text point con claim']
premise_id = self._add_premise(text=premise_text, stance="Con")
self._add_claim_premise(premise_id=premise_id, claim_id=claim_id)
def remove_duplicates(
premises: pandas.DataFrame,
claims: pandas.DataFrame,
assignments: pandas.DataFrame
) -> [pandas.DataFrame, pandas.DataFrame, pandas.DataFrame]:
"""
Remove duplicate premises and claims (w.r.t. text).
Update assignments:
- ids that belong to a duplicate have to be updated to the remaining id.
- then, duplicate assignments are removed
:param premises:
The premises.
:param claims:
The claims.
:param assignments:
The assignments.
:return:
The unique premises, claims and assignments.
"""
# extend assignments to have the premise and the claim text in df
ass_extended = pandas.merge(assignments, premises, how='inner', on="premise_id")
ass_extended = pandas.merge(ass_extended, claims, how='inner', on="claim_id")
# drop duplicates in claims and premises (first occurence is kept)
claims_df = claims.drop_duplicates(subset=["claim_text"])
premises_df = premises.drop_duplicates(subset=["premise_text"])
# extend assignments again by the now unique claim and premise text
ass_extended = pandas.merge(ass_extended, claims_df, how='inner', on="claim_text")
ass_extended =
|
pandas.merge(ass_extended, premises_df, how='inner', on="premise_text")
|
pandas.merge
|
#!/usr/bin/env python
# coding: utf-8
# Import libraries
import pandas as pd
import numpy as np
import math
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
import xgboost as xgb
import datetime
import pickle
import warnings
warnings.filterwarnings("ignore")
# Load data to dataframe
datafile = 'london_merged.csv'
df = pd.read_csv(datafile)
# Convert dtypes to save memory
df['weather_code'] = df['weather_code'].astype('uint8')
df['is_holiday'] = df['is_holiday'].astype('uint8')
df['is_weekend'] = df['is_weekend'].astype('uint8')
df['season'] = df['season'].astype('uint8')
df['t1'] = df['t1'].astype('float16')
df['t2'] = df['t2'].astype('float16')
df['hum'] = df['hum'].astype('float16')
df['wind_speed'] = df['wind_speed'].astype('float16')
# Sort data according to timestamp
df['timestamp'] = pd.to_datetime(df['timestamp'])
df = df.sort_values(by=['timestamp'],ascending=True)
df.reset_index(drop=True,inplace=True)
# Splitting data as Full train (80%), Test (20%)
df_full_train, df_test = train_test_split(df,test_size=0.2,shuffle=False,random_state=1)
# Set target and delete it from dataframe
y_full_train = df_full_train['cnt']
y_test = df_test['cnt']
del df_full_train['cnt']
del df_test['cnt']
# Function to train the model and predict on validation data
def train_predict(df_full_train,df_test,y_full_train,model):
X_full_train = df_full_train.values
model.fit(X_full_train, y_full_train)
X_test = df_test.values
y_pred = model.predict(X_test)
y_train_pred = model.predict(X_full_train)
return y_pred, y_train_pred, model
# Function to evaluate various metrics/scores on predictions on validation and training
def evaluate_scores(y_test_eval, y_pred_eval, y_full_train_eval, y_pred_full_train_eval):
scores = {}
scores['val_r2'] = r2_score(y_test_eval, y_pred_eval)
scores['val_mse'] = mean_squared_error(y_test_eval, y_pred_eval,squared=True)
scores['val_rmse'] = mean_squared_error(y_test_eval, y_pred_eval,squared=False)
scores['val_mae'] = mean_absolute_error(y_test_eval, y_pred_eval)
scores['train_r2'] = r2_score(y_full_train_eval, y_pred_full_train_eval)
scores['train_mse'] = mean_squared_error(y_full_train_eval, y_pred_full_train_eval,squared=True)
scores['train_rmse'] = mean_squared_error(y_full_train_eval, y_pred_full_train_eval,squared=False)
scores['train_mae'] = mean_absolute_error(y_full_train_eval, y_pred_full_train_eval)
rnd_digits = 5 #round upto how many digits
for metric, value in scores.items():
scores[metric] = round(scores[metric],rnd_digits)
return scores
# Function to perform pre processing on data before training
# Combining all the step by step processing done above into a function
# Function to now create different features from timestamp
def pre_process_new_ft(df_to_process):
df_to_process['year'] = df_to_process['timestamp'].dt.year
df_to_process['month'] = df_to_process['timestamp'].dt.month
df_to_process['day'] = df_to_process['timestamp'].dt.day
df_to_process['hour'] = df_to_process['timestamp'].dt.hour
df_to_process['day-of-week'] = pd.to_datetime(df_to_process['timestamp']).dt.dayofweek.values
df_to_process['week-of-year'] = pd.to_datetime(df_to_process['timestamp']).dt.isocalendar().week.values
df_to_process['day-of-year'] =
|
pd.to_datetime(df_to_process['timestamp'])
|
pandas.to_datetime
|
import pandas as pd
from pandas._testing import assert_frame_equal
from nempy import unit_constraints
def test_create_constraints():
unit_limit = pd.DataFrame({
'unit': ['A', 'B'],
'upper': [16.0, 23.0]
})
next_constraint_id = 4
rhs_col = 'upper'
direction = '<='
output_rhs, output_variable_map = unit_constraints.create_constraints(unit_limit, next_constraint_id, rhs_col,
direction)
expected_rhs = pd.DataFrame({
'unit': ['A', 'B'],
'service': ['energy', 'energy'],
'constraint_id': [4, 5],
'type': ['<=', '<='],
'rhs': [16.0, 23.0]
})
expected_variable_map = pd.DataFrame({
'constraint_id': [4, 5],
'unit': ['A', 'B'],
'service': ['energy', 'energy'],
'coefficient': [1.0, 1.0]
})
assert_frame_equal(output_rhs, expected_rhs)
assert_frame_equal(output_variable_map, expected_variable_map)
def test_one_unit_create_constraints():
unit_limit = pd.DataFrame({
'unit': ['A'],
'upper': [16.0]
})
next_constraint_id = 4
rhs_col = 'upper'
direction = '<='
output_rhs, output_variable_map = unit_constraints.create_constraints(unit_limit, next_constraint_id, rhs_col,
direction)
expected_rhs = pd.DataFrame({
'unit': ['A'],
'service': ['energy'],
'constraint_id': [4],
'type': ['<='],
'rhs': [16.0],
})
expected_variable_map = pd.DataFrame({
'constraint_id': [4],
'unit': ['A'],
'service': ['energy'],
'coefficient': [1.0],
})
assert_frame_equal(output_rhs, expected_rhs)
|
assert_frame_equal(output_variable_map, expected_variable_map)
|
pandas._testing.assert_frame_equal
|
## Machine learning testing script
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import posanal as usrpos
import random
plot_ML = True
data_init = pd.read_csv('results_exp1-5.csv')
# ---- Data Pre-processing ---- #
dataset = data_init
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:,6].values
#print dataset
#print '\nX = '
#print X
#print '\nY = '
#print y
# ---- Split into training and testing ---- #
results_RF = pd.DataFrame()
results_SLR = pd.DataFrame()
test_size = np.arange(0.05,1,0.05)
print(test_size)
for i in range(19):
ts=float(i+1)/20
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = ts,
random_state = 10)
# ---- Regression Model ---- #
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
regressor = RandomForestRegressor()
regressor.fit(X_train, y_train)
regressor2 = LinearRegression()
regressor2.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
y_pred2 = regressor2.predict(X_test)
# print 'y_pred = \n', y_pred
# print '\ny_test = \n', y_test
# ---- Plot this test ---- #
if plot_ML == True:
fig = plt.figure()
ax = fig.gca()
ax.plot(y_test, y_test, 'k--', label = 'Test Data')
ax.scatter(y_test, y_pred, c='blue', label = 'Random Forest Prediction', s=10)
ax.scatter(y_test, y_pred2, c='red', label = 'Linear Regression Prediction',s=10)
ax.set_xlabel('$R_g$ (test)',size='large')
ax.set_ylabel('$R_g$ (test, predicted)',size='large')
ax.legend()
ax.set_title('Test Size = {}'.format(ts))
fname = str('prediction_{}.png'.format(ts))
plt.savefig(fname)
# ---- Evaluate results ---- #
def error(pred, test):
N = len(test)
err_df = np.ndarray([N,1])
for i in range(N):
err_df[i][0] = abs((pred[i]-test[i]))/test[i]
err_df=pd.DataFrame(err_df)
mean_err = err_df.mean(axis=0)
std_err = err_df.std(axis=0)
errors = np.array([mean_err,std_err])
#print ts, errors, '\n'
return errors
err = 100*error(y_pred, y_test).transpose()
err2 = 100*error(y_pred2, y_test).transpose()
temp_RF = pd.DataFrame(err)
temp_SLR =
|
pd.DataFrame(err2)
|
pandas.DataFrame
|
# Verified and checked shipments of samples, as of May 2019.
import pandas as pd
import os
os.chdir("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/")
# Helper functions for cleanup...
import helpers
cols2save = ['patientID', 'country', 'source']
# [clean pbmc] ----------------------------------------------------------------------------------------------------
file1 = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/CViSB PBMC Samples_MP.xlsx"
pbmc = pd.read_excel(file1)
pbmc['source'] = "PBMCSamples_May2019"
pbmc['country'] = "Sierra Leone"
pbmc['patientID'] = pbmc.privatePatientID.apply(helpers.interpretID)
pbmc = pbmc[cols2save]
pbmc.shape
# [clean pbmc-dna] ----------------------------------------------------------------------------------------------------
file2 = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/CViSB PBMC-DNA Samples_MP.xlsx"
pbmcdna = pd.read_excel(file2)
pbmcdna['source'] = "PBMC-DNASamples_May2019"
pbmcdna['country'] = "Sierra Leone"
pbmcdna['patientID'] = pbmcdna.privatePatientID.apply(helpers.interpretID)
pbmcdna = pbmcdna[cols2save]
pbmcdna.shape
# [clean pbmc-rna] ----------------------------------------------------------------------------------------------------
file3 = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/CViSB PCMC-RNA Samples_MP.xlsx"
pbmcrna = pd.read_excel(file3)
pbmcrna['source'] = "PBMC-RNASamples_May2019"
pbmcrna['country'] = "Sierra Leone"
pbmcrna['patientID'] = pbmcrna.privatePatientID.apply(helpers.interpretID)
pbmcrna = pbmcrna[cols2save]
pbmcrna.shape
# [clean plasma] ----------------------------------------------------------------------------------------------------
file4 = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/CViSB Plasma Samples_MP.xlsx"
plasma =
|
pd.read_excel(file4)
|
pandas.read_excel
|
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
df = df_orig.copy()
df2 = df
df["A"] += 1.5
expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
@pytest.mark.parametrize(
"op",
[
"add",
"and",
"div",
"floordiv",
"mod",
"mul",
"or",
"pow",
"sub",
"truediv",
"xor",
],
)
def test_inplace_ops_identity2(self, op):
if op == "div":
return
df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
operand = 2
if op in ("and", "or", "xor"):
# cannot use floats for boolean ops
df["a"] = [True, False, True]
df_copy = df.copy()
iop = f"__i{op}__"
op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
tm.assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [
[1, 2, 3],
(1, 2, 3),
np.array([1, 2, 3], dtype=np.int64),
range(1, 4),
]:
expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index)
tm.assert_frame_equal(align(df, val, "index")[1], expected)
expected = DataFrame(
{"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index
)
tm.assert_frame_equal(align(df, val, "columns")[1], expected)
# length mismatch
msg = "Unable to coerce to Series, length must be 3: given 2"
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(
align(df, val, "index")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
tm.assert_frame_equal(
align(df, val, "columns")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
# shape mismatch
msg = "Unable to coerce to DataFrame, shape must be"
val = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.zeros((3, 3, 3))
msg = re.escape(
"Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
)
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
def test_no_warning(self, all_arithmetic_operators):
df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with tm.assert_produces_warning(None):
getattr(df, all_arithmetic_operators)(b, 0)
def test_pow_with_realignment():
# GH#32685 pow has special semantics for operating with null values
left = pd.DataFrame({"A": [0, 1, 2]})
right = pd.DataFrame(index=[0, 1, 2])
result = left ** right
expected = pd.DataFrame({"A": [np.nan, 1.0, np.nan]})
tm.assert_frame_equal(result, expected)
# TODO: move to tests.arithmetic and parametrize
def test_pow_nan_with_zero():
left = pd.DataFrame({"A": [np.nan, np.nan, np.nan]})
right = pd.DataFrame({"A": [0, 0, 0]})
expected = pd.DataFrame({"A": [1.0, 1.0, 1.0]})
result = left ** right
tm.assert_frame_equal(result, expected)
result = left["A"] ** right["A"]
tm.assert_series_equal(result, expected["A"])
def test_dataframe_series_extension_dtypes():
# https://github.com/pandas-dev/pandas/issues/34311
df = pd.DataFrame(np.random.randint(0, 100, (10, 3)), columns=["a", "b", "c"])
ser = pd.Series([1, 2, 3], index=["a", "b", "c"])
expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3)
expected = pd.DataFrame(expected, columns=df.columns, dtype="Int64")
df_ea = df.astype("Int64")
result = df_ea + ser
tm.assert_frame_equal(result, expected)
result = df_ea + ser.astype("Int64")
tm.assert_frame_equal(result, expected)
def test_dataframe_blockwise_slicelike():
# GH#34367
arr = np.random.randint(0, 1000, (100, 10))
df1 = pd.DataFrame(arr)
df2 = df1.copy()
df2.iloc[0, [1, 3, 7]] = np.nan
df3 = df1.copy()
df3.iloc[0, [5]] = np.nan
df4 = df1.copy()
df4.iloc[0, np.arange(2, 5)] = np.nan
df5 = df1.copy()
df5.iloc[0, np.arange(4, 7)] = np.nan
for left, right in [(df1, df2), (df2, df3), (df4, df5)]:
res = left + right
expected = pd.DataFrame({i: left[i] + right[i] for i in left.columns})
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"df, col_dtype",
[
(pd.DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"),
(pd.DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"),
],
)
def test_dataframe_operation_with_non_numeric_types(df, col_dtype):
# GH #22663
expected = pd.DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab"))
expected = expected.astype({"b": col_dtype})
result = df + pd.Series([-1.0], index=list("a"))
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import os
import importlib
from datetime import datetime
from pathlib import Path
from bs4 import BeautifulSoup
from joblib import Parallel, delayed
if importlib.util.find_spec("pandas") is not None:
import pandas as pd
from xbrr.base.reader.base_reader import BaseReader
from xbrr.edinet.reader.directory import Directory
from xbrr.edinet.reader.taxonomy import Taxonomy
from xbrr.edinet.reader.element import Element
from xbrr.edinet.reader.element_schema import ElementSchema
class Reader(BaseReader):
def __init__(self, xbrl_dir_or_file="", taxonomy=None):
super().__init__("edinet")
self.__xbrl_dir_or_file = xbrl_dir_or_file
self._cache = {}
if os.path.isdir(xbrl_dir_or_file):
self.xbrl_dir = Directory(xbrl_dir_or_file)
self.xbrl_file = self.xbrl_dir._find_file("xbrl", as_xml=False)
elif os.path.isfile(xbrl_dir_or_file):
self.xbrl_file = xbrl_dir_or_file
self.xbrl_dir = None
else:
raise Exception(
f"File or directory {xbrl_dir_or_file} does not Exsit.")
if isinstance(taxonomy, Taxonomy):
self.taxonomy = taxonomy
else:
if self.xbrl_dir:
root = Path(self.xbrl_dir.root).parent
else:
root = Path(self.xbrl_file).parent
if root.name == "raw":
# Cookiecutter data science structure
root = root.parent
root = root.joinpath("external")
self.taxonomy = Taxonomy(root)
self.taxonomy_year = ""
self.__set_taxonomy_year()
def set_cache(self, cache):
self._cache = cache
return self
def __reduce_ex__(self, proto):
return type(self), (self.__xbrl_dir_or_file, self.taxonomy)
def __set_taxonomy_year(self):
self.taxonomy_year = ""
date = self.xbrl.find("jpdei_cor:CurrentFiscalYearEndDateDEI").text
kind = self.xbrl.find("jpdei_cor:TypeOfCurrentPeriodDEI").text
date = datetime.strptime(date, "%Y-%m-%d")
for y in sorted(list(self.taxonomy.TAXONOMIES.keys()), reverse=True):
boarder_date = datetime(int(y[:4]), 3, 31)
if kind[0] in ("Q", "H") and date > boarder_date:
self.taxonomy_year = y
elif date >= boarder_date:
if y == 2019:
self.taxonomy_year = "2019_cg_ifrs"
else:
self.taxonomy_year = y
if self.taxonomy_year:
break
@property
def roles(self):
role_refs = self.find_all("link:roleRef")
roles = {}
for e in role_refs:
element = e.element
link = element["xlink:href"]
roles[element["roleURI"]] = {
"link": element["xlink:href"],
"name": self.read_by_link(link).element.find("link:definition").text
}
return roles
@property
def taxonomy_path(self):
return self.taxonomy.root.joinpath("taxonomy", str(self.taxonomy_year))
@property
def namespaces(self):
schema = self.xbrl.find("xbrli:xbrl")
namespaces = {}
for a in schema.attrs:
if a.startswith("xmlns:"):
namespaces[a.replace("xmlns:", "")] = schema.attrs[a]
return namespaces
@property
def xbrl(self):
if self.xbrl_dir:
path = self.xbrl_dir._find_file("xbrl", as_xml=False)
else:
path = self.xbrl_file
return self._read_from_cache(path)
def _read_from_cache(self, path):
xml = None
if path in self._cache:
xml = self._cache[path]
else:
with open(path, encoding="utf-8-sig") as f:
xml = BeautifulSoup(f, "lxml-xml")
self._cache[path] = xml
return self._cache[path]
def link_to_path(self, link):
path = link
element = ""
if "#" in link:
path, element = link.split("#")
if self.taxonomy and path.startswith(self.taxonomy.prefix):
path = path.replace(self.taxonomy.prefix, "")
path = os.path.join(self.taxonomy_path, path)
if not os.path.exists(path):
_path = Path(path)
xbrl_date = _path.parent.name
# check namespace directory
taxonomy_date = ""
if _path.parent.parent.exists():
for d in _path.parent.parent.iterdir():
if d.is_dir():
taxonomy_date = d.name
break
if taxonomy_date and taxonomy_date != xbrl_date:
path = path.replace(xbrl_date, taxonomy_date)
if os.path.isdir(path):
_path = Path(path)
xbrl_date = _path.name
# element should exist if name does not exist.
namespace = "_".join(element.split("_")[:-1])
path = _path.joinpath(f"{namespace}_{xbrl_date}.xsd")
elif self.xbrl_dir:
path = self.xbrl_dir._find_file("xsd", as_xml=False)
else:
path = os.path.dirname(self.xbrl_file)
return path
def read_by_link(self, link):
if link.startswith(self.taxonomy.prefix):
self.taxonomy.download(self.taxonomy_year)
element = ""
if "#" in link:
element = link.split("#")[-1]
path = self.link_to_path(link)
xml = self._read_from_cache(path)
if element:
xml = xml.select(f"#{element}")
# xml = xml.find("element", {"id": element})
if len(xml) > 0:
xml = xml[0]
xml = Element(element, xml, link, self)
return xml
def has_role_in_link(self, role_link, link_type):
if link_type == "presentation":
doc = self.xbrl_dir.pre
elif link_type == "calculation":
doc = self.xbrl_dir.cal
else:
return False
role = doc.find("link:roleRef", {"roleURI": role_link})
if role is not None:
return True
else:
return False
def read_schema_by_role(self, role_link, link_type="presentation",
label_kind="", label_verbose=False):
if self.xbrl_dir is None:
raise Exception("XBRL directory is required.")
doc = None
link_node = ""
arc_node = ""
if link_type == "presentation":
doc = self.xbrl_dir.pre
link_node = "link:presentationLink"
arc_node = "link:presentationArc"
elif link_type == "calculation":
doc = self.xbrl_dir.cal
link_node = "link:calculationLink"
arc_node = "link:calculationArc"
else:
raise Exception(f"Does not support {link_type}.")
schemas = []
role = doc.find(link_node, {"xlink:role": role_link})
if role is None:
return schemas
def get_name(loc):
return loc["xlink:href"].split("#")[-1]
def create(reader, reference):
return ElementSchema.create_from_reference(
reader, reference, label_kind, label_verbose)
nodes = {}
arc_role = ""
if link_type == "calculation":
arc_role = "summation-item"
else:
arc_role = "parent-child"
locs = {}
for loc in role.find_all("link:loc"):
locs[loc["xlink:label"]] = loc
for i, arc in enumerate(role.find_all(arc_node)):
if not arc["xlink:arcrole"].endswith(arc_role):
print("Unexpected arctype.")
continue
parent = locs[arc["xlink:from"]]
child = locs[arc["xlink:to"]]
if get_name(child) not in nodes:
c = create(self, child["xlink:href"]).set_alias(child["xlink:label"])
nodes[get_name(child)] = Node(c, arc["order"])
else:
nodes[get_name(child)].order = arc["order"]
if get_name(parent) not in nodes:
p = create(self, parent["xlink:href"]).set_alias(parent["xlink:label"])
nodes[get_name(parent)] = Node(p, i)
nodes[get_name(child)].add_parent(nodes[get_name(parent)])
parent_depth = -1
for name in nodes:
if parent_depth < nodes[name].depth:
parent_depth = nodes[name].depth
for name in nodes:
n = nodes[name]
item = {}
parents = n.get_parents()
parents = parents + ([""] * (parent_depth - len(parents)))
for i, p in zip(reversed(range(parent_depth)), parents):
name = p if isinstance(p, str) else p.name
order = "0" if isinstance(p, str) else p.order
item[f"parent_{i}"] = name
item[f"parent_{i}_label"] = ""
item[f"parent_{i}_order"] = order
item["order"] = n.order
item["depth"] = n.depth
item.update(n.element.to_dict())
schemas.append(item)
schemas =
|
pd.DataFrame(schemas)
|
pandas.DataFrame
|
#-*- coding: utf-8 -*-
import re
import os
from wxpy import *
import jieba
import numpy
import pandas as pd
import matplotlib.pyplot as plt
from scipy.misc import imread
from wordcloud import WordCloud, ImageColorGenerator
def remove_txt_file(path):
'''
写入txt文本
'''
if os.path.exists(path):
os.remove(path)
def write_txt_file(path, txt):
'''
写入txt文本
'''
with open(path, 'a', encoding='utf8') as f:
f.write(txt)
def read_txt_file(path):
'''
读取txt文本
'''
with open(path, 'r', encoding='utf8') as f:
return f.read()
def login():
# 初始化机器人,扫码登陆
bot = Bot()
# 获取所有好友
my_friends = bot.friends()
write_txt_file('data.js', 'var name = "')
write_txt_file('data.js', str(bot.self.name))
write_txt_file('data.js', '";\n')
return my_friends
def show_sex_ratio(friends):
# 使用一个字典统计好友男性和女性的数量
sex_dict = {'male': 0, 'female': 0, 'other':0}
for friend in friends:
# 统计性别
if friend.sex == 1:
sex_dict['male'] += 1
elif friend.sex == 2:
sex_dict['female'] += 1
else:
sex_dict['other'] += 1
# 为了方便数据的呈现,生成JSON Array格式数据
data = []
for key, value in sex_dict.items():
data.append({'name': key, 'value': value})
write_txt_file('data.js', 'var sex_data = ')
write_txt_file('data.js', str(data))
write_txt_file('data.js', ';\n')
print(sex_dict)
def show_area_distribution(friends):
# 使用一个字典统计各省好友数量
province_dict = {'北京': 0, '上海': 0, '天津': 0, '重庆': 0,
'河北': 0, '山西': 0, '吉林': 0, '辽宁': 0, '黑龙江': 0,
'陕西': 0, '甘肃': 0, '青海': 0, '山东': 0, '福建': 0,
'浙江': 0, '台湾': 0, '河南': 0, '湖北': 0, '湖南': 0,
'江西': 0, '江苏': 0, '安徽': 0, '广东': 0, '海南': 0,
'四川': 0, '贵州': 0, '云南': 0,
'内蒙古': 0, '新疆': 0, '宁夏': 0, '广西': 0, '西藏': 0,
'香港': 0, '澳门': 0, '其他': 0}
# 统计省份
for friend in friends:
if friend.province in province_dict.keys():
province_dict[friend.province] += 1
else:
province_dict['其他'] += 1
# 为了方便数据的呈现,生成JSON Array格式数据
data = []
for key, value in province_dict.items():
data.append({'name': key, 'value': value})
write_txt_file('data.js', "var provice_data = ")
write_txt_file('data.js', str(data))
write_txt_file('data.js', ';')
print(data)
def get_signature(friends):
if os.path.exists('signatures'):
os.remove('signatures')
# 统计签名
for friend in friends:
# 对数据进行清洗,将标点符号等对词频统计造成影响的因素剔除
pattern = re.compile(r'[一-龥]+')
filterdata = re.findall(pattern, friend.signature)
write_txt_file('signatures', ''.join(filterdata))
def show_signature(friends):
# 读取文件
content = read_txt_file('signatures')
segment = jieba.lcut(content)
words_df =
|
pd.DataFrame({'segment':segment})
|
pandas.DataFrame
|
# coding: utf-8
import pandas as pd
import numpy as np
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.models import HoverTool, PanTool, WheelZoomTool, BoxSelectTool, TapTool, OpenURL
from bokeh.models import GMapPlot, GMapOptions, Circle, DataRange1d, Range1d
from bokeh.io import curdoc
from bokeh.layouts import row, column, widgetbox, gridplot
from bokeh.models.widgets import Select, Slider, TextInput, DataTable, TableColumn, Div, Select
import itertools
import os
import logging
logger = logging.getLogger(__name__)
# data file locations
MAP_DATA_FILE = 'proteomz_stns.csv'
PROTEOMZ_DATA_FILE = 'ExampleDataset.csv'
TAXA_KEYS_DATA_FILE = 'Taxa_keys.csv'
# plot tools
TOOLS = "box_zoom, pan, xwheel_zoom, reset"
# global visual parameters
TITLE_TEXT_SIZE = '20px'
# introduction text
INTRODUCTION_HTML = """<b>Ocean Proteomics data from <i>Falkor ProteOMZ Expedition</i> in January-February of 2016.
The prototype interactive display can explore millions of protein values from over a hundred 300 liter samples
collected in the Central Pacific Oxygen Minimum Zone to depths greater than 1 kilometer. Use the sliders and
menus to enable scientific discovery within this novel dataset. <u>*NOTE: This is an example dataset containing <i>shuffled protein
annotations.</i> Public release of this dataset coming soon. </u> """
INTRODUCTION_WIDTH = 380
INTRODUCTION_HEIGHT = 130
# map visual parameters
MAP_WIDTH = 400
MAP_HEIGHT = 750
MAP_TITLE = 'ProteOMZ EXPEDITION FALKOR 2015'
MAP_LAT = 7.29
MAP_LON = -145.73
MAP_ZOOM = 4
# For map to work, uncomment this line and put your own google API key (https://developers.google.com/maps/documentation/javascript/get-api-key)
# MAP_API_KEY =
MAP_TYPE = 'hybrid'
DESELECTED_STATION_COLOR = 'white'
SELECTED_STATION_COLOR = 'red'
# profile visual parameters
PROFILE_TITLE = 'The Vertical Distribution of Microbial Proteins'
PROFILE_X_LABEL = 'Relative Abundance (Spectral Counts)'
PROFILE_Y_LABEL = 'Depth in the Ocean (meters)'
PROFILE_LINE_COLOR = 'red'
MAX_PROFILES = 1200
PROFILE_WIDTH = 600
PROFILE_HEIGHT = 1100
# histogram visual parameters
HISTOGRAM_TITLE = 'All Spectra/IDs'
HISTOGRAM_X_LABEL = 'Sum of Proteins/Spectra'
HISTOGRAM_WIDTH = 400
HISTOGRAM_HEIGHT = 1100
# bar chart visual parameters
TAXA_BAR_TITLE = 'The Diversity of Microbial Proteins'
TAXA_BAR_WIDTH = 600
TAXA_BAR_HEIGHT = 350
TAXA_BAR_COLORS = ["#e6ab02", "#1f78b4", "#b2182b", "#7570b3", "#e7298a", "#66a61e",
"#d95f02", "#666666"] #, "#1b9e77"]
#table settings
TAXON_TABLE_WIDTH=600
TAXON_TABLE_HEIGHT=750
# initial selections
ALL = 'ALL'
INIT_TAXA_GROUP = ALL
INIT_EC_GROUP = ALL
INIT_PCTILE = 95
INIT_NUT = 'N+N'
INIT_PROT = 'P1'
ST_SELECT_TITLE = 'Station'
NUT_SELECT_TITLE = 'Select Hydrographic Parameter for Correlation'
TN_SELECT_TITLE = 'Select Microbial Taxon'
EC_SELECT_TITLE = 'Major Enzyme Classes'
PERCENTILE_SLIDER_TITLE = 'Percentile (Note: be patient below 90%)'
EC_GROUPS = ['Oxidoreductases','Transferases', 'Hydrolases', 'Lyases', 'Isomerases', 'Ligases']
# computing axis ranges
def compute_profile_axis_ranges(z, station_counts):
# compute plot axis ranges for profile plot
max_z, min_z = z.max(), z.min()
min_c, max_c = 0, station_counts.max().max()
return (max_z, min_z), (min_c, max_c)
def compute_histogram_axis_ranges(histogram_datasource):
# compute plot axis ranges for histogram
min_h = 0
max_h = max(histogram_datasource.data['prot_cts']) * 1.5
return (min_h, max_h)
# main container
class Visualization(object):
def __init__(self):
"""read data and construct plot elements in their initial state"""
self.read_data()
z, station_counts, hydrography_counts, all_counts, selected_nut = self.select_initial_data(self.stations[0])
self.construct_datasources(z, station_counts, hydrography_counts, all_counts, selected_nut)
# create plots and widgets
self.make_plots(z, station_counts, hydrography_counts, selected_nut)
self.make_widgets()
def read_data(self):
"""read data and transform into dataframes"""
self._read_map_data()
self._read_proteomz_with_metadata()
def _read_map_data(self):
# second column data source for map stn/lat/long only, single point per stn
self.stn_coor = pd.read_csv(MAP_DATA_FILE, index_col=None)
def _read_proteomz_with_metadata(self):
"""read the large spreadsheet CSV, extract sections, and reorganize into
meaningful dataframes"""
df = pd.read_csv(PROTEOMZ_DATA_FILE, low_memory=False)
# extract metadata section of spreadsheet containing station and depth information
self.cruise_metadata = df[df.columns[:11]][:103]
# stations are in that column of cruise_metadata
self.stations = self.cruise_metadata.Station.unique().astype(int)
# extract counts section of spreadsheet
all_counts = df[df.columns[21:]][:103].transpose()
self.all_counts = all_counts.dropna().astype(float)
#extract hydrographic data
hydrography = df[df.columns[4:17]][:103].transpose()
self.hydrography = hydrography.dropna().astype(float)
# extract metadata section of spreadsheet containing prot id information
data = df[103:]
data.index=data.pop('ID')
for col in data.columns[:10]:
data.pop(col)
prot_metadata = data.transpose()
### For taxonomy information we read a different file
taxa_df = pd.read_csv(TAXA_KEYS_DATA_FILE)
prot_metadata.best_hit_taxon_id = pd.to_numeric(prot_metadata['best_hit_taxon_id'], errors='coerce')
taxa_df.best_hit_taxon_id = pd.to_numeric(taxa_df['best_hit_taxon_id'], errors='coerce')
prot_metadata_taxa =
|
pd.merge(prot_metadata, taxa_df, how='left')
|
pandas.merge
|
from operantanalysis import concat_lickometer_files
import numpy as np
import pandas as pd
import math
days = int(input("How many days would you like to analyze?"))
df = pd.DataFrame()
for i in range(days):
lick_df = concat_lickometer_files()
df2 = pd.DataFrame()
df3 = pd.DataFrame()
df4means = pd.DataFrame()
for colname, col in lick_df.iteritems():
bout_list = []
rate_list = []
bout = 0
lick_count = 0
x2 = 0
nan = 0
totaltime = -col[0]
lick_count_2 = 0
for x in col:
x2 += 1
if x <= 1000:
bout += x
lick_count += 1
if x2 == len(col):
bout_list += [bout]
if bout > 0:
rate_list += [lick_count / bout]
bout = 0
elif x > 1000:
bout_list += [bout]
if bout > 0:
rate_list += [lick_count / bout]
bout = 0
lick_count = 1
elif math.isnan(x) and nan < 1:
nan += 1
bout_list += [bout]
if bout > 0:
rate_list += [lick_count / bout]
bout = 0
totaltime += x
if totaltime <= 120000:
lick_count_2 += 1
if len(rate_list) == 0:
mean_bout_l = 0
mean_rate = 0
else:
mean_bout_l = sum(bout_list)/len(rate_list)
mean_rate = sum(rate_list)/len(rate_list)
lick_rate_2min = lick_count_2 / 120000
dftemp = pd.DataFrame(np.array(bout_list))
df2 = pd.concat([df2, dftemp], ignore_index=True, axis=1)
dftemp2 = pd.DataFrame(np.array(rate_list))
df3 = pd.concat([df3, dftemp2], ignore_index=True, axis=1)
dftempmeans = pd.DataFrame(np.array([np.count_nonzero(~np.isnan(col)), len(rate_list), mean_bout_l, mean_rate, lick_rate_2min]))
df4means =
|
pd.concat([df4means, dftempmeans], ignore_index=True, axis=1)
|
pandas.concat
|
# product-demand-xgbr_ho
'''
The dataset is from Kaggle: Forcasts for Product Demand.
https://www.kaggle.com/felixzhao/productdemandforecasting
The code cleans, scales, trains, fits, and predicts using the XGBoost Regressor
with hyperparameters tuned with Bayesian Optimization
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime, time, re, pickle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import XGBRegressor
from xgboost import plot_importance
from bayes_opt import BayesianOptimization
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from scipy.stats import kendalltau, spearmanr
def bracket(row):
'''
This function converts [negative] string values in bracket form to standard integers.
'''
if re.search('\(', row):
return int('-' + row[1:-1])
else:
return int(row)
def code_split(code):
'''
Splits the product codes and return numerical component of the product code.
'''
z = code.split('_')
return int(z[1])
def plot_features(booster, figsize):
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=120)
return plot_importance(booster=booster, ax=ax)
def xgb_evaluate(learning_rate, max_depth, subsample, eta, gamma, colsample_bytree):
params = {
'silent': True,
'eval_metric': 'rmse',
'learning_rate': learning_rate,
'max_depth': int(max_depth),
'subsample': subsample,
'eta': eta,
'gamma': gamma,
'colsample_bytree': colsample_bytree
}
# Used 1000 boosting rounds in the full model; computationally expensive
cv_result = xgb.cv(params, dtrain, num_boost_round=200, nfold=5)
# Bayesian optimization only knows how to maximize, not minimize, so return the negative RMSE
return -1.0 * cv_result['test-rmse-mean'].iloc[-1]
if __name__ == '__main__':
df = pd.read_csv('Historical Product Demand.csv')
print('\nCleaning the data. Please wait...\n')
df['Date'] =
|
pd.to_datetime(df['Date'])
|
pandas.to_datetime
|
__author__ = 'Yan'
import pandas
import statistics
import numpy as np
import matplotlib.pylab as plt
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LassoLarsCV
from sklearn import preprocessing
# bug fix for display formats to avoid run time errors
|
pandas.set_option('display.float_format', lambda x:'%.2f'%x)
|
pandas.set_option
|
import rdflib
from datetime import datetime
from nanopub import Nanopublication
import logging
import sys
import pandas as pd
import configparser
import hashlib
from .autonomic.update_change_service import UpdateChangeService
from whyis.namespace import whyis, prov, sio
class Interpreter(UpdateChangeService):
kb = ":"
cb_fn = None
timeline_fn = None
data_fn = None
prefix_fn = "prefixes.txt"
prefixes = {}
studyRef = None
unit_code_list = []
unit_uri_list = []
unit_label_list = []
explicit_entry_list = []
virtual_entry_list = []
explicit_entry_tuples = []
virtual_entry_tuples = []
cb_tuple = {}
timeline_tuple = {}
config = configparser.ConfigParser()
def __init__(self, config_fn=None): # prefixes should be
if config_fn is not None:
try:
self.config.read(config_fn)
except Exception as e:
logging.exception("Error: Unable to open configuration file: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'prefixes'):
self.prefix_fn = self.config.get('Prefixes', 'prefixes')
# prefix_file = open(self.prefix_fn,"r")
# self.prefixes = prefix_file.readlines()
prefix_file = pd.read_csv(self.prefix_fn, dtype=object)
try:
for row in prefix_file.itertuples():
self.prefixes[row.prefix] = row.url
except Exception as e:
logging.exception("Error: Something went wrong when trying to read the Prefix File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'base_uri'):
self.kb = self.config.get('Prefixes', 'base_uri')
if self.config.has_option('Source Files', 'dictionary'):
dm_fn = self.config.get('Source Files', 'dictionary')
try:
dm_file = pd.read_csv(dm_fn, dtype=object)
try: # Populate virtual and explicit entry lists
for row in dm_file.itertuples():
if pd.isnull(row.Column):
logging.exception("Error: The SDD must have a column named 'Column'")
sys.exit(1)
if row.Column.startswith("??"):
self.virtual_entry_list.append(row)
else:
self.explicit_entry_list.append(row)
except Exception as e:
logging.exception(
"Error: Something went wrong when trying to read the Dictionary Mapping File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
except Exception as e:
logging.exception("Error: The specified Dictionary Mapping file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'codebook'):
self.cb_fn = self.config.get('Source Files', 'codebook')
if self.cb_fn is not None:
try:
cb_file = pd.read_csv(self.cb_fn, dtype=object)
try:
inner_tuple_list = []
for row in cb_file.itertuples():
if (pd.notnull(row.Column) and row.Column not in self.cb_tuple):
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Code"] = row.Code
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Class):
inner_tuple["Class"] = row.Class
if "Resource" in row and pd.notnull(row.Resource):
inner_tuple["Resource"] = row.Resource
inner_tuple_list.append(inner_tuple)
self.cb_tuple[row.Column] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Codebook file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Codebook file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'timeline'):
self.timeline_fn = self.config.get('Source Files', 'timeline')
if self.timeline_fn is not None:
try:
timeline_file = pd.read_csv(self.timeline_fn, dtype=object)
try:
inner_tuple_list = []
for row in timeline_file.itertuples():
if pd.notnull(row.Name) and row.Name not in self.timeline_tuple:
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Type"] = row.Type
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Start):
inner_tuple["Start"] = row.Start
if pd.notnull(row.End):
inner_tuple["End"] = row.End
if pd.notnull(row.Unit):
inner_tuple["Unit"] = row.Unit
if pd.notnull(row.inRelationTo):
inner_tuple["inRelationTo"] = row.inRelationTo
inner_tuple_list.append(inner_tuple)
self.timeline_tuple[row.Name] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Timeline file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Timeline file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'code_mappings'):
cmap_fn = self.config.get('Source Files', 'code_mappings')
code_mappings_reader = pd.read_csv(cmap_fn)
for code_row in code_mappings_reader.itertuples():
if pd.notnull(code_row.code):
self.unit_code_list.append(code_row.code)
if pd.notnull(code_row.uri):
self.unit_uri_list.append(code_row.uri)
if pd.notnull(code_row.label):
self.unit_label_list.append(code_row.label)
if self.config.has_option('Source Files', 'data_file'):
self.data_fn = self.config.get('Source Files', 'data_file')
def getInputClass(self):
return whyis.SemanticDataDictionary
def getOutputClass(self):
return whyis.SemanticDataDictionaryInterpretation
def get_query(self):
return '''SELECT ?s WHERE { ?s ?p ?o .} LIMIT 1\n'''
def process(self, i, o):
print("Processing SDD...")
self.app.db.store.nsBindings = {}
npub = Nanopublication(store=o.graph.store)
# prefixes={}
# prefixes.update(self.prefixes)
# prefixes.update(self.app.NS.prefixes)
self.writeVirtualEntryNano(npub)
self.writeExplicitEntryNano(npub)
self.interpretData(npub)
def parseString(self, input_string, delim):
my_list = input_string.split(delim)
my_list = [element.strip() for element in my_list]
return my_list
def rdflibConverter(self, input_word):
if "http" in input_word:
return rdflib.term.URIRef(input_word)
if ':' in input_word:
word_list = input_word.split(":")
term = self.prefixes[word_list[0]] + word_list[1]
return rdflib.term.URIRef(term)
return rdflib.Literal(input_word, datatype=rdflib.XSD.string)
def codeMapper(self, input_word):
unitVal = input_word
for unit_label in self.unit_label_list:
if unit_label == input_word:
unit_index = self.unit_label_list.index(unit_label)
unitVal = self.unit_uri_list[unit_index]
for unit_code in self.unit_code_list:
if unit_code == input_word:
unit_index = self.unit_code_list.index(unit_code)
unitVal = self.unit_uri_list[unit_index]
return unitVal
def convertVirtualToKGEntry(self, *args):
if args[0][:2] == "??":
if self.studyRef is not None:
if args[0] == self.studyRef:
return self.prefixes[self.kb] + args[0][2:]
if len(args) == 2:
return self.prefixes[self.kb] + args[0][2:] + "-" + args[1]
return self.prefixes[self.kb] + args[0][2:]
if ':' not in args[0]:
# Check for entry in column list
for item in self.explicit_entry_list:
if args[0] == item.Column:
if len(args) == 2:
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(",
"").replace(
")", "").replace("/", "-").replace("\\", "-") + "-" + args[1]
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace(
")", "").replace("/", "-").replace("\\", "-")
return '"' + args[0] + "\"^^xsd:string"
return args[0]
def checkVirtual(self, input_word):
try:
if input_word[:2] == "??":
return True
return False
except Exception as e:
logging.exception("Something went wrong in Interpreter.checkVirtual(): ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
def isfloat(self, value):
try:
float(value)
return True
except ValueError:
return False
def writeVirtualEntryNano(self, nanopub):
for item in self.virtual_entry_list:
virtual_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(item.Column[2:]))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
nanopub.assertion.add(
(term, rdflib.RDFS.label, rdflib.Literal(str(item.Column[2:]), datatype=rdflib.XSD.string)))
# Set the rdf:type of the virtual row to either the Attribute or Entity value (or else owl:Individual)
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Entity"] = self.codeMapper(item.Entity)
if virtual_tuple["Entity"] == "hasco:Study":
self.studyRef = item.Column
virtual_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Attribute"] = self.codeMapper(item.Attribute)
else:
logging.warning(
"Warning: Virtual entry not assigned an Entity or Attribute value, or was assigned both.")
virtual_tuple["Column"] = item.Column
# If there is a value in the inRelationTo column ...
if pd.notnull(item.inRelationTo):
virtual_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
virtual_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
virtual_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
virtual_tuple["Relation"] = item.Relation
virtual_tuple["Role"] = item.Role
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if pd.notnull(item.wasDerivedFrom):
if ',' in item.wasDerivedFrom:
derivedFromTerms = self.parseString(item.wasDerivedFrom, ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm))))
else:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom))))
virtual_tuple["wasDerivedFrom"] = item.wasDerivedFrom
if pd.notnull(item.wasGeneratedBy):
if ',' in item.wasGeneratedBy:
generatedByTerms = self.parseString(item.wasGeneratedBy, ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm))))
else:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy))))
virtual_tuple["wasGeneratedBy"] = item.wasGeneratedBy
self.virtual_entry_tuples.append(virtual_tuple)
if self.timeline_fn is not None:
for key in self.timeline_tuple:
tl_term = self.rdflibConverter(self.convertVirtualToKGEntry(key))
nanopub.assertion.add((tl_term, rdflib.RDF.type, rdflib.OWL.Class))
for timeEntry in self.timeline_tuple[key]:
if 'Type' in timeEntry:
nanopub.assertion.add(
(tl_term, rdflib.RDFS.subClassOf, self.rdflibConverter(timeEntry['Type'])))
if 'Label' in timeEntry:
nanopub.assertion.add((tl_term, rdflib.RDFS.label,
rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string)))
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
nanopub.assertion.add((tl_term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
if 'Start' in timeEntry:
start_time = rdflib.BNode()
nanopub.assertion.add((start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
nanopub.assertion.add((tl_term, sio.hasStartTime, start_time))
if 'End' in timeEntry:
end_time = rdflib.BNode()
nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End']))))
nanopub.assertion.add((tl_term, sio.hasEndTime, end_time))
if 'Unit' in timeEntry:
nanopub.assertion.add(
(tl_term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit']))))
if 'inRelationTo' in timeEntry:
nanopub.assertion.add((tl_term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(timeEntry['inRelationTo']))))
nanopub.provenance.add((tl_term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
def writeExplicitEntryNano(self, nanopub):
for item in self.explicit_entry_list:
explicit_entry_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(
item.Column.replace(" ", "_").replace(",", "").replace("(", "").replace(")", "").replace("/",
"-").replace(
"\\", "-")))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
if pd.notnull(item.Attribute):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper(item.Attribute)
elif pd.notnull(item.Entity):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Entity"] = self.codeMapper(item.Entity)
else:
nanopub.assertion.add((term, rdflib.RDFS.subClassOf, sio.Attribute))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper("sio:Attribute")
logging.warning("Warning: Explicit entry not assigned an Attribute or Entity value.")
if pd.notnull(item.attributeOf):
nanopub.assertion.add(
(term, sio.isAttributeOf, self.rdflibConverter(self.convertVirtualToKGEntry(item.attributeOf))))
explicit_entry_tuple["isAttributeOf"] = self.convertVirtualToKGEntry(item.attributeOf)
else:
logging.warning("Warning: Explicit entry not assigned an isAttributeOf value.")
if pd.notnull(item.Unit):
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.convertVirtualToKGEntry(self.codeMapper(item.Unit)))))
explicit_entry_tuple["Unit"] = self.convertVirtualToKGEntry(self.codeMapper(item.Unit))
if pd.notnull(item.Time):
nanopub.assertion.add(
(term, sio.existsAt, self.rdflibConverter(self.convertVirtualToKGEntry(item.Time))))
explicit_entry_tuple["Time"] = item.Time
if pd.notnull(item.inRelationTo):
explicit_entry_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (
|
pd.isnull(item.Role)
|
pandas.isnull
|
import pandas as pd
# create df use dict, key == col-key , value == col-value
data = {'state': ['Beijing', 'Shanghai', 'HeBei', 'ShanDong'],
'year': [2001, 2002, 2003, 2004],
'pop': [1.5, 1.7, 3.6, 2.4]}
df1 = pd.DataFrame(data)
print("df1:{}".format(df1))
print("head:\n{}".format(df1.head()))
print("tail:\n{}".format(df1.tail()))
# 指定列的顺序
df2 = pd.DataFrame(data, columns=['year', 'pop', 'state'])
print("==============\ndf2:\n{}".format(df2))
# 列不存在
df3 = pd.DataFrame(data, columns=['year', 'state', 'size'])
print("=============\ndf3:\n{}".format(df3))
print("isnull:{}".format(pd.isnull(df3)))
# 指定列的顺序, with index
idxs = ['one', 'tow', 'tree', 'four']
df4 = pd.DataFrame(data, columns=['state', 'pop', 'year'], index=idxs)
print("===============\ndf4_with_index:\n{}".format(df4))
print("df4.columns:{}".format(df4.columns))
# 获取行: 使用loc && iloc获取行,
# 获取列: 使用[index] && ['table']
# 获取dataframe中一行
print("df4.loc['one']:{}".format(df4.loc['one']))
# 获取dataframe中的一列
print("df4.['state']:{}".format(df4['state']))
# 修改一列的值
df4['state'] = 'new-state'
print("df4.['state']:{}".format(df4['state']))
# 修改一行的值
df4.loc['one'] = 'new-row'
print("df4.loc['one']:{}".format(df4.loc['one']))
# 1.1 修改矩阵中某个元素[x,y]的值
df4.loc['tow']['state'] = 'state-xxxx'
print("df4:{}".format(df4))
# 1.2 修改矩阵中某个元素[x,y]的值
df4['state'].loc['one'] = 'state-yyyy'
print("df4:{}".format(df4))
# add new-col(Series) to DataFrame
df5 = pd.DataFrame(data, index=['one', 'tow', 'tree', 'four'])
print('df5:{}'.format(df5))
s1 = pd.Series([1, 2, 3], index=['one', 'tow', 'tree'])
print("s1:{}".format(s1))
df5['new col'] = s1
print("df5:{}".format(df5))
# del col
del df5['new col']
print("del df5['new col']:{}".format(df5))
# TODO: add new row to DataFrame: dataframe是按列组织数据的,
# 每一列是一个series, 添加一行相当于给每一列(series)添加一个新元素
# dataframe - 字典嵌套
# 如果嵌套字典被赋值给DataFrame, pandas会将字典的键作为列,将内部字典的键作为行索引:
pop = {'Nevada': {2001: 2.4, 2002: 2.9},
'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
df6 = pd.DataFrame(pop)
print(f"===================\ndf6:\n{df6}")
# 行列转换
print(f"===================\ndf6.T\n:{df6.T}")
# 如果已经显式指明索引的话,内部字典的键将不会被排序:
df7 = df6.copy()
df7.index = [11, 22, 33]
print(f"===================\ndf7:\n{df7}")
# 使用包含series的字典构造dataframe
print("df6:{}".format(df6))
series1 = df6['Ohio'][:-1] # -1: 倒数第二个
series2 = df6['Nevada'][:2]
print("====================\nseries1:{}".format(series1))
print("====================\nseries2:{}".format(series2))
pdata = { 'Ohio' : series1, 'Nevada': series2 }
df8 =
|
pd.DataFrame(pdata)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
#from pyramid.arima import auto_arima
import numpy as np
import logging
import sys
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acf
import matplotlib.pylab as plt
#from fbprophet import Prophet
#from tbats import BATS, TBATS
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
def getClosest_DateCode(ref_data_seg, current_dateCode, last_year_date):
"""
This function returns the closest day with the same code according to last year reference data
:param current_date: this is the current date. The starting point to make forecast
:param current_dateCode: code into forecast date
:param last_year_date: last year date (reference data)
:return: day (date): closest day
"""
i = 0
first = True
code1 = 0
code2 = 0
day_plus = None
day_minus = None
if last_year_date.year == ref_data_seg['FECHA'].iloc[0].year:
while((code1!=current_dateCode) & (code2 != current_dateCode)):
if first: # TODO: refractor this part of code and put at the begining of the function
code1 = ref_data_seg.loc[
|
pd.to_datetime(ref_data_seg['FECHA'])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
import config
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def main():
origincol = ["id", "s1", "s2", "label"]
copycol2 = ['f_1dis', 'f_2word_dis', 'f_2char_dis', 'f_3word_dis', 'f_3char_dis',
'f_1dis2', 'f_2word_dis2', 'f_2char_dis2', 'f_3word_dis2', 'f_3char_dis2',
'f_1dis3', 'f_2word_dis3', 'f_2char_dis3', 'f_3word_dis3', 'f_3char_dis3',
'f_1dis4', 'f_2word_dis4', 'f_2char_dis4', 'f_3word_dis4', 'f_3char_dis4']
copycol12 = ['z3_cosine', 'z3_manhatton', 'z3_euclidean', 'z3_pearson', 'z3_spearman', 'z3_kendall']
copycol13 = ['f_total_unique_words', 'f_wc_diff', 'f_wc_ratio', 'f_wc_diff_unique',
'f_wc_ratio_unique', 'f_char_diff', 'f_char_ratio']
copycol18 = ["d_nlevenshtein_1", "d_nlevenshtein_2", "d_jaro_winkler", "d_jaccard"]
copycol19 = ["z_tfidf_cos_sim",
"z_w2v_bow_dis_cosine", "z_w2v_bow_dis_euclidean", "z_w2v_bow_dis_minkowski", "z_w2v_bow_dis_cityblock", "z_w2v_bow_dis_canberra",
"z_w2v_tfidf_dis_cosine", "z_w2v_tfidf_dis_euclidean", "z_w2v_tfidf_dis_minkowski", "z_w2v_tfidf_dis_cityblock", "z_w2v_tfidf_dis_canberra",
"z_glove_bow_dis_cosine", "z_glove_bow_dis_euclidean", "z_glove_bow_dis_minkowski", "z_glove_bow_dis_cityblock", "z_glove_bow_dis_canberra",
"z_glove_tfidf_dis_cosine", "z_glove_tfidf_dis_euclidean", "z_glove_tfidf_dis_minkowski", "z_glove_tfidf_dis_cityblock", "z_glove_tfidf_dis_canberra"]
train_raw =
|
pd.read_csv(config.path_train_raw, sep="\t", names=origincol, encoding="utf-8")
|
pandas.read_csv
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
dist = operations.Distribution(['grp', 'platform'], sum_x)
output = dist.compute_on(df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 0.5, 0.25, 0.25],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_melted(self):
output = self.distribution.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.25, 0.75],
'grp': ['A', 'B'],
'Metric': ['Distribution of sum(X)', 'Distribution of sum(X)']
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby(self):
output = self.distribution.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_melted(self):
output = self.distribution.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'Metric': ['Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 1, 2, 3.5],
'grp': ['A', 'A', 'B', 'B'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.distribution.compute_on(df, ['grp0', 'country'])
bar = self.distribution.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.distribution.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
testing.assert_frame_equal(output, expected)
def test_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.Distribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Distribution of sum(X)': [0.25, 0.75],
'Distribution of count(X)': [0.5, 0.5]
},
index=['A', 'B'],
columns=['Distribution of sum(X)', 'Distribution of count(X)'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_where(self):
metric = operations.Distribution('grp', self.sum_x, where='country == "US"')
metric_no_filter = operations.Distribution('grp', self.sum_x)
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_distribution_pipeline(self):
output = self.sum_x | operations.Distribution('grp') | metrics.compute_on(
self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.Distribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_distribution_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class CumulativeDistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
metric = operations.CumulativeDistribution('grp', sum_x)
def test_cumulative_distribution(self):
output = self.metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
cum_dict = operations.CumulativeDistribution(['grp', 'platform'], sum_x)
output = cum_dict.compute_on(df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 0.5, 0.75, 1],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_melted(self):
output = self.metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.75, 1.],
'grp': ['A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 2
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby(self):
output = self.metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_melted(self):
output = self.metric.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 2, 1.5, 3],
'grp': ['B', 'B', 'A', 'A'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.metric.compute_on(df, ['grp0', 'country'])
output.sort_index(level=['grp0', 'grp', 'country'], inplace=True)
bar = self.metric.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.metric.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected = expected.sort_index(level=['grp0', 'grp', 'country'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_ascending(self):
metric = operations.CumulativeDistribution(
'grp', self.sum_x, ascending=False)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order_splitby(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 2. / 3, 1.],
'grp': ['A', 'B', 'A'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.CumulativeDistribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Cumulative Distribution of sum(X)': [0.75, 1.],
'Cumulative Distribution of count(X)': [0.5, 1.]
},
index=['A', 'B'],
columns=[
'Cumulative Distribution of sum(X)',
'Cumulative Distribution of count(X)'
])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_where(self):
metric = operations.CumulativeDistribution(
'grp', metrics.Count('X'), where='country == "US"')
metric_no_filter = operations.CumulativeDistribution(
'grp', metrics.Count('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_pipeline(self):
output = self.sum_x | operations.CumulativeDistribution(
'grp') | metrics.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.CumulativeDistribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_cumulative_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_cumulative_distribution_with_jackknife_internal_caching_cleaned_up(
self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class PercentChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_percent_change(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0., 0.], [150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [150., 0.],
'Metric': ['sum(X) Percent Change', 'count(X) Percent Change'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_melted_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0., 150., 0., 0.],
'Metric': [
'sum(X) Percent Change', 'sum(X) Percent Change',
'count(X) Percent Change', 'count(X) Percent Change'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame(
{
'sum(X) Percent Change': [0., 100. / 3, 0., 200. / 3, np.nan],
'count(X) Percent Change': [0., -50., 0., 0., np.nan],
'Condition': [0, 1, 0, 1, 1],
'grp': ['A', 'A', 'B', 'B', 'C']
},
columns=[
'sum(X) Percent Change', 'count(X) Percent Change', 'Condition',
'grp'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [0., 100. / 3, 0., 200. / 3, np.nan, 0., -50., 0., 0., np.nan],
'Metric': ['sum(X) Percent Change'] * 5 +
['count(X) Percent Change'] * 5,
'Condition': [0, 1, 0, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'B', 'C'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6, 1.2, 2.2, 3.2, 4.2, 5.2, 6.5],
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'grp0': ['foo'] * 6 + ['bar'] * 6
})
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(df, ['grp0', 'grp'])
bar = metric.compute_on(df[df.grp0 == 'bar'], 'grp')
foo = metric.compute_on(df[df.grp0 == 'foo'], 'grp')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected.sort_index(level=['grp0', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns(self):
df = self.df.copy()
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_include_baseline(self):
df = self.df.copy()
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_splitby(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_include_baseline_splitby(
self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_where(self):
metric = operations.PercentChange(
'Condition', 0, metrics.Sum('X'), where='grp == "A"')
metric_no_filter = operations.PercentChange('Condition', 0,
metrics.Sum('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'A'])
testing.assert_frame_equal(output, expected)
def test_percent_change_pipeline(self):
metric = operations.PercentChange('Condition', 0)
output = self.metric_lst | metric | metrics.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.PercentChange('Condition', 0, sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('Condition').X.sum(), sum_x.get_cached(42, 'Condition'))
self.assertTrue(metric.in_cache(42))
def test_percent_change_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.PercentChange('Condition', 0, sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_percent_change_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C'],
'cookie': [1, 2, 3] * 2
})
sum_x = metrics.Sum('X')
m = operations.PercentChange('Condition', 0, sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class AbsoluteChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_absolute_change(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_absolute_change_include_baseline(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0, 0], [9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_absolute_change_melted(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [9, 0],
'Metric': ['sum(X) Absolute Change', 'count(X) Absolute Change'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_melted_include_baseline(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0, 9, 0, 0],
'Metric': [
'sum(X) Absolute Change', 'sum(X) Absolute Change',
'count(X) Absolute Change', 'count(X) Absolute Change'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_splitby(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame(
{
'sum(X) Absolute Change': [0., 1., 0., 2., np.nan],
'count(X) Absolute Change': [0., -1., 0., 0., np.nan],
'Condition': [0, 1, 0, 1, 1],
'grp': ['A', 'A', 'B', 'B', 'C']
},
columns=[
'sum(X) Absolute Change', 'count(X) Absolute Change', 'Condition',
'grp'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_splitby_melted(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [0., 1., 0., 2., np.nan, 0., -1., 0., 0., np.nan],
'Metric': ['sum(X) Absolute Change'] * 5 +
['count(X) Absolute Change'] * 5,
'Condition': [0, 1, 0, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'B', 'C'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6, 1.2, 2.2, 3.2, 4.2, 5.2, 6.5],
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'grp0': ['foo'] * 6 + ['bar'] * 6
})
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(df, ['grp0', 'grp'])
bar = metric.compute_on(df[df.grp0 == 'bar'], 'grp')
foo = metric.compute_on(df[df.grp0 == 'foo'], 'grp')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected.sort_index(level=['grp0', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns(self):
df = self.df.copy()
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns_include_baseline(self):
df = self.df.copy()
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns_splitby(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns_include_baseline_splitby(
self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_where(self):
metric = operations.AbsoluteChange(
'Condition', 0, metrics.Sum('X'), where='grp == "A"')
metric_no_filter = operations.AbsoluteChange('Condition', 0,
metrics.Sum('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'A'])
testing.assert_frame_equal(output, expected)
def test_absolute_change_pipeline(self):
metric = operations.AbsoluteChange('Condition', 0)
output = self.metric_lst | metric | metrics.compute_on(self.df)
expected = pd.DataFrame(
[[9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_absolute_change_internal_caching(self):
sum_x = metrics.Sum('X')
pct = operations.PercentChange('Condition', 0, sum_x)
ab = operations.AbsoluteChange('Condition', 0, sum_x)
metric = metrics.MetricList((pct, ab))
with mock.patch.object(
sum_x, 'compute_through', wraps=sum_x.compute_through) as mock_fn:
metric.compute_on(self.df)
mock_fn.assert_called_once()
def test_absolute_change_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.AbsoluteChange('Condition', 0, sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('Condition').X.sum(), sum_x.get_cached(42, 'Condition'))
self.assertTrue(metric.in_cache(42))
def test_absolute_change_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.AbsoluteChange('Condition', 0, sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_absolute_change_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C'],
'cookie': [1, 2, 3] * 2
})
sum_x = metrics.Sum('X')
m = operations.AbsoluteChange('Condition', 0, sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class MHTests(unittest.TestCase):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2],
'conversions': [1, 0, 1, 2, 1, 1],
'Id': [1, 2, 3, 1, 2, 3],
'Condition': [0, 0, 0, 1, 1, 1]
})
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
cvr = metrics.Ratio('conversions', 'clicks', 'cvr')
metric_lst = metrics.MetricList((sum_conv / sum_click, cvr))
def test_mh(self):
metric = operations.MH('Condition', 0, 'Id', self.cvr)
output = metric.compute_on(self.df)
expected = pd.DataFrame([[40.]], columns=['cvr MH Ratio'], index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_mh_include_baseline(self):
metric = operations.MH('Condition', 0, 'Id', self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0., 0.], [40., 40.]],
columns=['sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_mh_melted(self):
metric = operations.MH('Condition', 0, 'Id', self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [40., 40.],
'Metric': ['sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_melted_include_baseline(self):
metric = operations.MH('Condition', 0, 'Id', self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = expected = pd.DataFrame({
'Value': [0., 40., 0., 40.],
'Metric': [
'sum(conversions) / sum(clicks) MH Ratio',
'sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio',
'cvr MH Ratio'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_splitby(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1, 1, 0, 1, 2, 1, 2],
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A'] * 6 + ['B'] * 6
})
metric = operations.MH('Condition', 0, 'Id', self.metric_lst)
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame([['A', 1, 40., 40.], ['B', 1, 80., 80.]],
columns=[
'grp', 'Condition',
'sum(conversions) / sum(clicks) MH Ratio',
'cvr MH Ratio'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_splitby_melted(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1, 1, 0, 1, 2, 1, 2],
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A'] * 6 + ['B'] * 6
})
metric = operations.MH('Condition', 0, 'Id', self.metric_lst, True)
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(
level=['Metric', 'grp'], ascending=[False, True],
inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [0., 40., 0., 80., 0., 40., 0., 80.],
'Metric': ['sum(conversions) / sum(clicks) MH Ratio'] * 4 +
['cvr MH Ratio'] * 4,
'Condition': [0, 1] * 4,
'grp': ['A', 'A', 'B', 'B'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_multiple_condition_columns(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1] * 2,
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'B'] * 6,
})
metric = operations.MH(['Condition', 'grp'], (0, 'A'), 'Id',
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.MH('Condition_and_grp', (0, 'A'), 'Id',
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_mh_multiple_condition_columns_include_baseline(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1] * 2,
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'B'] * 6,
})
metric = operations.MH(['Condition', 'grp'], (0, 'A'), 'Id',
self.metric_lst, True)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.MH('Condition_and_grp', (0, 'A'), 'Id',
self.metric_lst, True)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_mh_multiple_condition_columns_splitby(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1] * 2,
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'B'] * 6,
'grp2': ['foo', 'foo', 'bar'] * 4,
})
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
self.metric_lst = metrics.MetricList(
(sum_conv / sum_click, metrics.Ratio('conversions', 'clicks', 'cvr')))
metric = operations.MH(['Condition', 'grp'], (0, 'A'), 'Id',
self.metric_lst)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.MH('Condition_and_grp', (0, 'A'), 'Id',
self.metric_lst)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_mh_where(self):
metric = operations.MH(
'Condition', 0, 'Id', self.metric_lst, True, where='Id != 3')
metric_no_filter = operations.MH('Condition', 0, 'Id', self.metric_lst,
True)
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.Id != 3])
testing.assert_frame_equal(output, expected)
def test_mh_splitby_multiple(self):
df = pd.DataFrame({
'clicks': np.random.random(24),
'conversions': np.random.random(24),
'Id': [1, 2, 3, 1, 2, 3] * 4,
'Condition': [0, 0, 0, 1, 1, 1] * 4,
'grp': (['A'] * 6 + ['B'] * 6) * 2,
'grp0': ['foo'] * 12 + ['bar'] * 12
})
metric = operations.MH('Condition', 0, 'Id', self.metric_lst)
output = metric.compute_on(df, ['grp0', 'grp'])
output.sort_index(level=['grp0', 'grp'], inplace=True) # For Py2
bar = metric.compute_on(df[df.grp0 == 'bar'], 'grp')
foo = metric.compute_on(df[df.grp0 == 'foo'], 'grp')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected.sort_index(level=['grp0', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_stratified_by_multiple(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2, 12, 31, 22, 30, 15, 23],
'conversions': [1, 0, 1, 2, 1, 1, 3, 2, 4, 6, 7, 1],
'Id': [1, 2, 3, 1, 2, 3] * 2,
'platform': ['Desktop'] * 6 + ['Mobile'] * 6,
'Condition': [0, 0, 0, 1, 1, 1] * 2
})
df['id_platform'] = df[['Id', 'platform']].apply(tuple, axis=1)
cvr = metrics.Ratio('conversions', 'clicks', 'cvr')
metric = operations.MH('Condition', 0, ['Id', 'platform'], cvr)
output = metric.compute_on(df)
expected = operations.MH('Condition', 0, 'id_platform', cvr).compute_on(df)
testing.assert_frame_equal(output, expected)
def test_mh_on_operations(self):
df = pd.DataFrame({
'clicks': np.random.random(24),
'conversions': np.random.random(24),
'Id': [1, 2, 1, 2] * 6,
'Condition': [0, 0, 0, 1, 1, 1] * 4,
'grp': list('AABBCCBC') * 3,
})
sum_clicks = metrics.Sum('clicks')
ab = operations.AbsoluteChange('grp', 'A', sum_clicks)
pct = operations.PercentChange('grp', 'A', sum_clicks)
metric = operations.MH('Condition', 0, 'Id', ab / pct)
output = metric.compute_on(df)
d = (metrics.MetricList(
(ab, pct))).compute_on(df, ['Condition', 'Id']).reset_index()
m = metric(metrics.Sum(ab.name) / metrics.Sum(pct.name))
expected = m.compute_on(d, 'grp')
expected.columns = output.columns
expected = expected.reorder_levels(output.index.names)
testing.assert_frame_equal(output, expected)
def test_mh_fail_on_nonratio_metric(self):
with self.assertRaisesRegex(ValueError,
'MH only makes sense on ratio Metrics.'):
operations.MH('Condition', 0, 'Id', self.sum_click).compute_on(self.df)
def test_mh_pipeline(self):
metric = operations.MH('Condition', 0, 'Id')
output = self.metric_lst | metric | metrics.compute_on(self.df)
expected = pd.DataFrame(
[[40., 40.]],
columns=['sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_internal_caching(self):
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
m = metrics.MetricList((sum_conv / sum_click, sum_conv / sum_conv))
with mock.patch.object(
sum_conv, 'compute_through', return_value=1, autospec=True) as mock_fn:
m.compute_on(self.df, return_dataframe=False)
mock_fn.assert_called_once()
def test_cache_key(self):
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
metric_lst = metrics.MetricList([sum_conv / sum_click])
metric = operations.MH('Condition', 0, 'Id', metric_lst)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby(['Condition', 'Id']).clicks.sum(),
sum_click.get_cached(42, ['Condition', 'Id']))
testing.assert_series_equal(
self.df.groupby(['Condition', 'Id']).conversions.sum(),
sum_conv.get_cached(42, ['Condition', 'Id']))
self.assertTrue(metric.in_cache(42))
def test_internal_caching_cleaned_up(self):
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
metric_lst = metrics.MetricList([sum_conv / sum_click])
m = operations.MH('Condition', 0, 'Id', metric_lst)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(metric_lst.cache, {})
self.assertEqual(sum_click.cache, {})
self.assertEqual(sum_conv.cache, {})
self.assertIsNone(m.cache_key)
self.assertIsNone(metric_lst.cache_key)
self.assertIsNone(sum_click.cache_key)
self.assertIsNone(sum_conv.cache_key)
def test_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2],
'conversions': [1, 0, 1, 2, 1, 1],
'Id': [1, 2, 3, 1, 2, 3],
'Condition': [0, 0, 0, 1, 1, 1],
'cookie': [1, 2, 3] * 2
})
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
metric_lst = metrics.MetricList([sum_conv / sum_click])
m = operations.MH('Condition', 0, 'Id', metric_lst)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(metric_lst.cache, {})
self.assertEqual(sum_click.cache, {})
self.assertEqual(sum_conv.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(metric_lst.cache_key)
self.assertIsNone(sum_click.cache_key)
self.assertIsNone(sum_conv.cache_key)
class JackknifeTests(unittest.TestCase):
count_x0 = metrics.Sum('X') / metrics.Mean('X')
count_x1 = metrics.Count('X')
count_x2 = metrics.Metric('count_ground_truth', compute=lambda x: x.X.count())
dot1 = metrics.Dot('X', 'X')
dot2 = metrics.Dot('X', 'X', True)
metric = metrics.MetricList((count_x0, count_x1, count_x2, dot1, dot2))
change = operations.AbsoluteChange('condition', 'foo', metric)
jk = operations.Jackknife('cookie', metric)
jk_change = operations.Jackknife('cookie', change)
def test_jackknife(self):
df = pd.DataFrame({'X': np.arange(0, 3, 0.5), 'cookie': [1, 2, 2, 1, 2, 2]})
unmelted = self.jk.compute_on(df)
expected = pd.DataFrame(
[[6., 1.] * 3 + [(df.X**2).sum(), 4.625, (df.X**2).mean(), 0.875]],
columns=pd.MultiIndex.from_product([[
'sum(X) / mean(X)', 'count(X)', 'count_ground_truth', 'sum(X * X)',
'mean(X * X)'
], ['Value', 'Jackknife SE']],
names=['Metric', None]))
testing.assert_frame_equal(unmelted, expected)
melted = self.jk.compute_on(df, melted=True)
expected = pd.DataFrame(
data={
'Value': [6.] * 3 + [(df.X**2).sum(), (df.X**2).mean()],
'Jackknife SE': [1, 1, 1, 4.625, 0.875]
},
columns=['Value', 'Jackknife SE'],
index=[
'sum(X) / mean(X)', 'count(X)', 'count_ground_truth', 'sum(X * X)',
'mean(X * X)'
])
expected.index.name = 'Metric'
testing.assert_frame_equal(melted, expected)
def test_jackknife_with_weighted_mean(self):
df = pd.DataFrame({'X': [1, 2, 2], 'W': [1, 2, 2], 'cookie': [1, 2, 2]})
mean = metrics.Mean('X', 'W')
jk = operations.Jackknife('cookie', mean)
output = jk.compute_on(df)
expected = pd.DataFrame(
[[1.8, 0.5]],
columns=pd.MultiIndex.from_product(
[['W-weighted mean(X)'], ['Value', 'Jackknife SE']],
names=['Metric', None]))
testing.assert_frame_equal(output, expected)
def test_jackknife_too_few_buckets(self):
df = pd.DataFrame({'X': range(2), 'cookie': [1, 1]})
with self.assertRaises(ValueError) as cm:
self.jk.compute_on(df)
self.assertEqual(str(cm.exception), 'Too few cookie to jackknife.')
def test_jackknife_one_metric_fail_on_one_unit(self):
df = pd.DataFrame({
'X': range(1, 7),
'cookie': [1, 2, 2, 1, 2, 3],
'grp': ['B'] * 3 + ['A'] * 3
})
sum1 = metrics.Sum('X', where='X > 2')
sum2 = metrics.Sum('X', 'foo', where='X > 4')
ms = metrics.MetricList((sum1, sum2))
m = operations.Jackknife('cookie', ms)
output = m.compute_on(df)
expected = pd.concat((m(sum1).compute_on(df), m(sum2).compute_on(df)), 1)
testing.assert_frame_equal(output, expected)
def test_jackknife_splitby_partial_overlap(self):
df = pd.DataFrame({
'X': range(1, 7),
'cookie': [1, 2, 2, 1, 2, 3],
'grp': ['B'] * 3 + ['A'] * 3
})
unmelted = self.jk.compute_on(df, 'grp')
expected = []
for g in ['A', 'B']:
expected.append(self.jk.compute_on(df[df.grp == g]))
expected =
|
pd.concat(expected, keys=['A', 'B'], names=['grp'])
|
pandas.concat
|
import requests
import bs4
import pandas as pd
import numpy as np
import datetime
import os
import time
def getSoup(baseUrl):
url = baseUrl
while (True):
page = requests.get(url)
if (page.status_code == 200):
return bs4.BeautifulSoup(page.content, 'lxml')
time.sleep(5)
print('slept')
def getTable(soup):
table = soup.find(name='div', attrs={'id': 'cpm'}).find(name='table')
if (table is not None):
return table.find_all('tr')
return None
def getIndexesToSplit(table):
indexes = []
for tr in table:
if (tr.find('th') is not None):
indexes.append(table.index(tr))
return indexes
def splitTables(table, indexes):
tables = []
for i in range(0, len(indexes)):
if (i < len(indexes) - 1):
tables.append(table[indexes[i]:indexes[i + 1]])
else:
tables.append(table[indexes[i]:])
return tables
def getData(table):
df = pd.DataFrame([[td.text for td in row.findAll('td')] for row in table],
columns=['A', 'B', 'C', 'Home Team', 'Score', 'Away Team', 'D'])
df.drop(['A', 'B', 'C', 'D'], axis=1, inplace=True)
return df
def transformData(df, date, leagueName):
df.loc[df['Score'] == 'v', 'Score'] = '-'
df['Home Goals'] = df['Score'].str.split('-').str.get(0).str.strip()
df['Away Goals'] = df['Score'].str.split('-').str.get(1).str.strip()
df.drop(['Score'], axis=1, inplace=True)
df['Date'] = date
if (leagueName == 'Premiership' or leagueName == 'English Div 1 (old)'):
df['Tier'] = '1'
elif (leagueName == 'Football League Div 1' or leagueName == 'English Div 2 (old)'):
df['Tier'] = '2'
elif (leagueName == 'Football League Div 2' or leagueName == 'English Div 3 (old)'):
df['Tier'] = '3'
elif (leagueName == 'English Div 3 (north)'):
df['Tier'] = '3N'
elif (leagueName == 'English Div 3 (south)'):
df['Tier'] = '3S'
elif (leagueName == 'Football League Div 3' or leagueName == 'English Division 4'):
df['Tier'] = '4'
else:
df['Tier'] = np.NaN
df.drop(df[
|
pd.isnull(df.Tier)
|
pandas.isnull
|
import numpy as np
import pandas as pd
import fasttext
from sklearn.preprocessing import MultiLabelBinarizer
from skmultilearn.model_selection import IterativeStratification, \
iterative_train_test_split
from functools import reduce
CIP_TAGS = list(map(lambda x: x.strip(),
"gratis, mat, musik, kurs, casino, dans, musuem, inlines, "
"båt, barn, film, språk, hockey, bowling, fika, sport, "
"biljard, bingo, bio, opera, kultur, grilla, kubb, "
"festival, cykel, brännboll, picknick, konsert, pub, "
"frisbeegolf, mc, gokart, svamp, bangolf, teater, "
"afterwork, promenad, humor, utmaning, fest, shopping, "
"resa, sällskapsspel, träna, pubquiz, poker, bok, foto, "
"hund, skridskor, karaoke, dart, bada, diskussion, "
"badminton, pyssel, golf, klättring, loppis, boule, mässa, "
"flytthjälp, yoga, innebandy, pingis, handboll, jogga, "
"tennis, högtid, astronomi, fiske, beachvolleyboll, "
"friluftsliv, volleyboll, geocaching, vindsurfing, "
"shuffleboard, SUP, standup, paddel".split(',')))
def load_raw_normalized_dataset(path, drop_missing):
"""Load raw CiP dataset.
Args:
path: Path to raw CSV file
drop_missing: If true, drop events with missing titles or descriptions
Returns:
events_df, tags_df: Event and tag dataframes as tuple
"""
# FIXME: Import 'id' as integer
cip_df = pd.read_csv(path,
header=None,
names=['id', 'weekday', 'time', 'title', 'description',
'tag_status', 'tag'],
na_values=['-01:00:00'])
# Drop any events with missing titles or descriptions
cip_df.dropna(subset=['title', 'description'], inplace=True)
# Convert time strings to actual times
cip_df['time'] = pd.to_datetime(cip_df['time']).dt.time
events_df = cip_df.groupby('id').first().drop(
columns=['tag_status', 'tag']).reset_index()
tags_df = pd.DataFrame({
'id': cip_df['id'],
'tag': cip_df['tag'],
'verified': cip_df['tag_status'] == 1,
'removed': cip_df['tag_status'] == 2
})
# Ignore verified and remove 'removed' tags
tags_df = tags_df[~tags_df['removed']]
tags_df.drop(columns=['verified', 'removed'], inplace=True)
return events_df, tags_df
def calculate_top_tags(tags_df, n_tags, use_cip_tags=True):
"""Calculate top tags from tags dataset
Args:
tags_df: Dataset to extract top tags from
n_tags: Number of topmost tags to get if generating
use_cip_tags: Use pre-defined tags from CiP (ignores `n_tags`)
Returns:
List of topmost tags
"""
tag_counts = tags_df['tag'].value_counts()
if use_cip_tags:
# Not all CiP tags are necessarily present in the dataset
# and not necessarily in sufficient amounts
present_tags = set(tag_counts[tag_counts > 5].index)
return list(filter(lambda t: t in present_tags, CIP_TAGS))
else:
return tag_counts.index[:n_tags]
def tags_to_matrix(events_df, tags_df, top_tags):
"""Converts tags to feature matrix
Args:
events_df: Events dataset
tags_df: Tags dataset
top_tags: Tags to include
Returns:
Feature matrix for tags
"""
# Combine tags into lists
tags = tags_df.groupby('id')['tag'].agg(lambda x: list(x)).reset_index()
# Handle events with no top tags
# TODO: Kludge, write nicer
missing_tags = pd.DataFrame({
'id': events_df[~events_df['id'].isin(tags['id'])]['id'].unique()
})
missing_tags['tag'] = [[] for _ in range(len(missing_tags))]
tags = pd.concat([tags, missing_tags])
# Align tags with events
aligned_tags = events_df.merge(tags, on='id')
# Convert aligned tags to matrix
mlb = MultiLabelBinarizer(classes=top_tags)
return mlb.fit_transform(aligned_tags['tag'])
def matrix_to_tags(tags, top_tags):
top_array = np.array(top_tags)
joined_tags = []
for row in tags:
joined_tags.append(reduce(lambda a, b: a + "," + b, top_array[row > 0]))
return np.array(joined_tags)
def load_datasets(path, drop_missing=True, n_tags=72,
test_size=0.2, random_state=42):
"""Load and split dataset from raw CiP data.
Args:
path: Path to raw CiP dataset
drop_missing: Drop events with no description or title
n_tags: Number of top tags to use (ignored)
test_size: Fraction of events to include in test set
random_state: Random state for the split
Returns:
(events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
"""
events_df, tags_df = load_raw_normalized_dataset(path,
drop_missing=drop_missing)
top_tags = calculate_top_tags(tags_df, n_tags=n_tags)
# Only keep top tags
tags_df = tags_df[tags_df['tag'].isin(top_tags)]
tag_matrix = tags_to_matrix(events_df, tags_df, top_tags)
# Split data into public training set and private test set
stratifier = IterativeStratification(
n_splits=2, order=2,
sample_distribution_per_fold=[test_size, 1.0 - test_size],
random_state=random_state)
train_indices, test_indices = next(stratifier.split(events_df, tag_matrix))
events_train, tags_train = events_df.iloc[train_indices], \
tag_matrix[train_indices, :]
events_test, tags_test = events_df.iloc[test_indices], \
tag_matrix[test_indices, :]
tags_train_stats = pd.DataFrame({
'tag': top_tags,
'count': tags_train.sum(axis=0)
}).sort_values('count', ascending=False)
return (events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
def extract_corpus(events_df):
"""Extract text corpus from event descriptions.
Args:
events_df: Event dataset
Returns:
List of event descriptions as raw text
"""
from tagger._preprocessing.html import HTMLToText
from tagger._preprocessing.characterset import CharacterSet
from tagger._preprocessing.lowercase import Lowercase
from sklearn.pipeline import Pipeline
cleaning_pipeline = Pipeline([
('html', HTMLToText()),
('cset', CharacterSet(punctuation=False)),
('lcase', Lowercase())
])
return list(cleaning_pipeline.fit_transform(events_df['description']))
def fasttext_wordvectors(corpus_path, model_path):
"""Compute word vectors using FastText.
Args:
corpus_path: Path to corpus
model_path: Path for storing FastText model
Returns:
FastText model
"""
model = fasttext.train_unsupervised(corpus_path)
model.save_model(model_path)
return model
def save_corpus(events_df, path):
"""Extract and store corpus for events.
Args:
events_df: Events dataset
path: Path for storing corpus
"""
corpus = extract_corpus(events_df)
with open(path, 'w') as f:
for doc in corpus:
f.write(doc + '\n')
if __name__ == '__main__':
# Generate static datasets and wordvectors for local dev
import os
print("Current working directory:", os.getcwd())
# Compute word vectors
events_df, tags_df = load_raw_normalized_dataset(
"../../../data/raw/citypolarna_public_events_out.csv",
drop_missing=True)
CORPUS_PATH = "../../../data/corpus.txt"
MODEL_PATH = "../../../data/wordvectors.bin"
save_corpus(events_df, CORPUS_PATH)
model = fasttext_wordvectors(CORPUS_PATH, MODEL_PATH)
# Split datasets
events_train, tags_train, events_test, tags_test, top_tags, tags_train_stats = load_datasets(
"../../../data/raw/citypolarna_public_events_out.csv"
)
print(f"Number of train events: {len(events_train)}")
print(f"Number of test events: {len(events_test)}")
# TODO: Proper path handling
DATA_PATH = "../../../data/"
events_train.to_csv(DATA_PATH + "events_train.csv", index=False)
events_test.to_csv(DATA_PATH + "events_test.csv", index=False)
# A kludge, but convenient — pandas can load from URL:s
pd.DataFrame(tags_train).to_csv(DATA_PATH + "tags_train.csv", index=False)
|
pd.DataFrame(tags_test)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on 2017-9-5
@author: cheng.li
"""
import math
import pandas as pd
import numpy as np
from PyFin.api import *
from alphamind.api import *
factor = 'ROE'
universe = Universe('custom', ['zz800'])
start_date = '2010-01-01'
end_date = '2018-04-26'
freq = '10b'
category = 'sw_adj'
level = 1
horizon = map_freq(freq)
ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse')
def factor_analysis(factor):
engine = SqlEngine()
factors = {
'f1': CSQuantiles(factor),
'f2': CSQuantiles(factor, groups='sw1_adj'),
'f3': LAST(factor)
}
total_factor = engine.fetch_factor_range(universe, factors, dates=ref_dates)
_, risk_exp = engine.fetch_risk_model_range(universe, dates=ref_dates)
industry = engine.fetch_industry_range(universe, dates=ref_dates, category=category, level=level)
rets = engine.fetch_dx_return_range(universe, horizon=horizon, offset=1, dates=ref_dates)
total_factor = pd.merge(total_factor, industry[['trade_date', 'code', 'industry']], on=['trade_date', 'code'])
total_factor = pd.merge(total_factor, risk_exp, on=['trade_date', 'code'])
total_factor = pd.merge(total_factor, rets, on=['trade_date', 'code']).dropna()
df_ret = pd.DataFrame(columns=['f1', 'f2', 'f3'])
df_ic = pd.DataFrame(columns=['f1', 'f2', 'f3'])
total_factor_groups = total_factor.groupby('trade_date')
for date, this_factors in total_factor_groups:
raw_factors = this_factors['f3'].values
industry_exp = this_factors[industry_styles + ['COUNTRY']].values.astype(float)
processed_values = factor_processing(raw_factors, pre_process=[], risk_factors=industry_exp,
post_process=[percentile])
this_factors['f3'] = processed_values
factor_values = this_factors[['f1', 'f2', 'f3']].values
positions = (factor_values >= 0.8) * 1.
positions[factor_values <= 0.2] = -1
positions /= np.abs(positions).sum(axis=0)
ret_values = this_factors.dx.values @ positions
df_ret.loc[date] = ret_values
ic_values = this_factors[['dx', 'f1', 'f2', 'f3']].corr().values[0, 1:]
df_ic.loc[date] = ic_values
print(f"{factor} is finished")
return {'ic': (df_ic.mean(axis=0), df_ic.std(axis=0) / math.sqrt(len(df_ic))),
'ret': (df_ret.mean(axis=0), df_ret.std(axis=0) / math.sqrt(len(df_ic))),
'factor': factor}
if __name__ == '__main__':
from dask.distributed import Client
try:
client = Client("10.63.6.176:8786")
cols =
|
pd.MultiIndex.from_product([['mean', 'std'], ['raw', 'peer', 'neutralized']])
|
pandas.MultiIndex.from_product
|
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
import time
import random
import numpy as np
import pandas as pd
from .init_positions import Initializer
from .progress_bar import ProgressBarLVL0, ProgressBarLVL1
from .times_tracker import TimesTracker
from .memory import Memory
from .print_info import print_info
def time_exceeded(start_time, max_time):
run_time = time.time() - start_time
return max_time and run_time > max_time
def score_exceeded(score_best, max_score):
return max_score and score_best >= max_score
def set_random_seed(nth_process, random_state):
"""
Sets the random seed separately for each thread
(to avoid getting the same results in each thread)
"""
if nth_process is None:
nth_process = 0
if random_state is None:
random_state = np.random.randint(0, high=2 ** 32 - 2)
random.seed(random_state + nth_process)
np.random.seed(random_state + nth_process)
class Search(TimesTracker):
def __init__(self):
super().__init__()
self.optimizers = []
self.new_results_list = []
self.all_results_list = []
@TimesTracker.eval_time
def _score(self, pos):
return self.score(pos)
@TimesTracker.iter_time
def _initialization(self, init_pos, nth_iter):
self.init_pos(init_pos)
score_new = self._score(init_pos)
self.evaluate(score_new)
self.p_bar.update(score_new, init_pos, nth_iter)
@TimesTracker.iter_time
def _iteration(self, nth_iter):
pos_new = self.iterate()
score_new = self._score(pos_new)
self.evaluate(score_new)
self.p_bar.update(score_new, pos_new, nth_iter)
def _init_search(self):
if "progress_bar" in self.verbosity:
self.p_bar = ProgressBarLVL1(
self.nth_process, self.n_iter, self.objective_function
)
else:
self.p_bar = ProgressBarLVL0(
self.nth_process, self.n_iter, self.objective_function
)
set_random_seed(self.nth_process, self.random_state)
# get init positions
init = Initializer(self.conv)
init_positions = init.set_pos(self.initialize)
return init_positions
def _early_stop(self):
if time_exceeded(self.start_time, self.max_time):
return True
elif score_exceeded(self.p_bar.score_best, self.max_score):
return True
else:
return False
def print_info(self, *args):
print_info(*args)
def search(
self,
objective_function,
n_iter,
max_time=None,
max_score=None,
memory=True,
memory_warm_start=None,
verbosity=["progress_bar", "print_results", "print_times"],
random_state=None,
nth_process=None,
):
self.start_time = time.time()
if verbosity is False:
verbosity = []
self.objective_function = objective_function
self.n_iter = n_iter
self.max_time = max_time
self.max_score = max_score
self.memory = memory
self.memory_warm_start = memory_warm_start
self.verbosity = verbosity
self.random_state = random_state
self.nth_process = nth_process
init_positions = self._init_search()
if memory is True:
mem = Memory(memory_warm_start, self.conv)
self.score = self.results_mang.score(mem.memory(objective_function))
else:
self.score = self.results_mang.score(objective_function)
# loop to initialize N positions
for init_pos, nth_iter in zip(init_positions, range(n_iter)):
if self._early_stop():
break
self._initialization(init_pos, nth_iter)
# loop to do the iterations
for nth_iter in range(len(init_positions), n_iter):
if self._early_stop():
break
self._iteration(nth_iter)
self.results =
|
pd.DataFrame(self.results_mang.results_list)
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import tempfile
import time
from collections import OrderedDict
from datetime import datetime
from string import printable
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet
except ImportError: # pragma: no cover
fastparquet = None
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
from .... import tensor as mt
from .... import dataframe as md
from ....config import option_context
from ....tests.core import require_cudf, require_ray
from ....utils import arrow_array_to_objects, lazy_import, pd_release_version
from ..dataframe import from_pandas as from_pandas_df
from ..series import from_pandas as from_pandas_series
from ..index import from_pandas as from_pandas_index, from_tileable
from ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables
from ..from_records import from_records
ray = lazy_import("ray")
_date_range_use_inclusive = pd_release_version[:2] >= (1, 4)
def test_from_pandas_dataframe_execution(setup):
# test empty DataFrame
pdf = pd.DataFrame()
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(columns=list("ab"))
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(
np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)]
)
df = from_pandas_df(pdf, chunk_size=(13, 21))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
def test_from_pandas_series_execution(setup):
# test empty Series
ps = pd.Series(name="a")
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = from_pandas_series(ps)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
ps = pd.Series(
np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name="a"
)
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
def test_from_pandas_index_execution(setup):
pd_index = pd.timedelta_range("1 days", periods=10)
index = from_pandas_index(pd_index, chunk_size=7)
result = index.execute().fetch()
pd.testing.assert_index_equal(pd_index, result)
def test_index_execution(setup):
rs = np.random.RandomState(0)
pdf = pd.DataFrame(
rs.rand(20, 10),
index=np.arange(20, 0, -1),
columns=["a" + str(i) for i in range(10)],
)
df = from_pandas_df(pdf, chunk_size=13)
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf.index)
result = df.columns.execute().fetch()
pd.testing.assert_index_equal(result, pdf.columns)
# df has unknown chunk shape on axis 0
df = df[df.a1 < 0.5]
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)
s = pd.Series(pdf["a1"], index=pd.RangeIndex(20))
series = from_pandas_series(s, chunk_size=13)
# test series.index which has value
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
s = pdf["a2"]
series = from_pandas_series(s, chunk_size=13)
# test series.index
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
# test tensor
raw = rs.random(20)
t = mt.tensor(raw, chunk_size=13)
result = from_tileable(t).execute().fetch()
pd.testing.assert_index_equal(result, pd.Index(raw))
def test_initializer_execution(setup):
arr = np.random.rand(20, 30)
pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])
df = md.DataFrame(pdf, chunk_size=(15, 10))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
df = md.DataFrame(arr, index=md.date_range("2020-1-1", periods=20))
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result, pd.DataFrame(arr, index=pd.date_range("2020-1-1", periods=20))
)
df = md.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=md.date_range("1/1/2010", periods=6, freq="D"),
)
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=pd.date_range("1/1/2010", periods=6, freq="D"),
),
)
s = np.random.rand(20)
ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name="a")
series = md.Series(ps, chunk_size=7)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = md.Series(s, index=md.date_range("2020-1-1", periods=20))
result = series.execute().fetch()
pd.testing.assert_series_equal(
result, pd.Series(s, index=pd.date_range("2020-1-1", periods=20))
)
pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
index = md.Index(md.Index(pi))
result = index.execute().fetch()
pd.testing.assert_index_equal(pi, result)
def test_index_only(setup):
df = md.DataFrame(index=[1, 2, 3])
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=[1, 2, 3])
pd.testing.assert_series_equal(s.execute().fetch(), pd.Series(index=[1, 2, 3]))
df = md.DataFrame(index=md.Index([1, 2, 3]))
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=md.Index([1, 2, 3]), dtype=object)
pd.testing.assert_series_equal(
s.execute().fetch(), pd.Series(index=[1, 2, 3], dtype=object)
)
def test_series_from_tensor(setup):
data = np.random.rand(10)
series = md.Series(mt.tensor(data), name="a")
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data, name="a"))
series = md.Series(mt.tensor(data, chunk_size=3))
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data))
series = md.Series(mt.ones((10,), chunk_size=4))
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(np.ones(10)),
)
index_data = np.random.rand(10)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=mt.tensor(index_data, chunk_size=4),
)
pd.testing.assert_series_equal(
series.execute().fetch(), pd.Series(data, name="a", index=index_data)
)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=md.date_range("2020-1-1", periods=10),
)
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(data, name="a", index=pd.date_range("2020-1-1", periods=10)),
)
def test_from_tensor_execution(setup):
tensor = mt.random.rand(10, 10, chunk_size=5)
df = dataframe_from_tensor(tensor)
tensor_res = tensor.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
df_result = df.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.RangeIndex(0, 10))
pd.testing.assert_index_equal(df_result.columns, pd.RangeIndex(0, 10))
pd.testing.assert_frame_equal(df_result, pdf_expected)
# test from tensor with unknown shape
tensor2 = tensor[tensor[:, 0] < 0.9]
df = dataframe_from_tensor(tensor2)
df_result = df.execute().fetch()
tensor_res = tensor2.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
pd.testing.assert_frame_equal(df_result.reset_index(drop=True), pdf_expected)
# test converted with specified index_value and columns
tensor2 = mt.random.rand(2, 2, chunk_size=1)
df2 = dataframe_from_tensor(
tensor2, index=pd.Index(["a", "b"]), columns=pd.Index([3, 4])
)
df_result = df2.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.Index(["a", "b"]))
pd.testing.assert_index_equal(df_result.columns, pd.Index([3, 4]))
# test converted from 1-d tensor
tensor3 = mt.array([1, 2, 3])
df3 = dataframe_from_tensor(tensor3)
result3 = df3.execute().fetch()
pdf_expected = pd.DataFrame(np.array([1, 2, 3]))
pd.testing.assert_frame_equal(pdf_expected, result3)
# test converted from identical chunks
tensor4 = mt.ones((10, 10), chunk_size=3)
df4 = dataframe_from_tensor(tensor4)
result4 = df4.execute().fetch()
pdf_expected = pd.DataFrame(tensor4.execute().fetch())
pd.testing.assert_frame_equal(pdf_expected, result4)
# from tensor with given index
tensor5 = mt.ones((10, 10), chunk_size=3)
df5 = dataframe_from_tensor(tensor5, index=np.arange(0, 20, 2))
result5 = df5.execute().fetch()
pdf_expected = pd.DataFrame(tensor5.execute().fetch(), index=np.arange(0, 20, 2))
pd.testing.assert_frame_equal(pdf_expected, result5)
# from tensor with given index that is a tensor
raw7 = np.random.rand(10, 10)
tensor7 = mt.tensor(raw7, chunk_size=3)
index_raw7 = np.random.rand(10)
index7 = mt.tensor(index_raw7, chunk_size=4)
df7 = dataframe_from_tensor(tensor7, index=index7)
result7 = df7.execute().fetch()
pdf_expected = pd.DataFrame(raw7, index=index_raw7)
pd.testing.assert_frame_equal(pdf_expected, result7)
# from tensor with given index is a md.Index
raw10 = np.random.rand(10, 10)
tensor10 = mt.tensor(raw10, chunk_size=3)
index10 = md.date_range("2020-1-1", periods=10, chunk_size=3)
df10 = dataframe_from_tensor(tensor10, index=index10)
result10 = df10.execute().fetch()
pdf_expected = pd.DataFrame(raw10, index=
|
pd.date_range("2020-1-1", periods=10)
|
pandas.date_range
|
from sklearn.datasets import make_blobs
from sklearn import tree
from dtreeviz.trees import *
import pandas as pd
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=1.0)
X = pd.DataFrame(X)
X.columns = ['x_1', 'x_2']
y =
|
pd.DataFrame(y)
|
pandas.DataFrame
|
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 =
|
Categorical(['a', 'b'], categories=['a', 'b', 'c'])
|
pandas.Categorical
|
'''
事前にpostgreSQLでpandas_db 内に pandas_table を作成し、fkubotaに権限を与えた。
psycopg2 だと上書きできないらしい。
'''
import pandas as pd
# import psycopg2
from sqlalchemy import create_engine
# データベースの接続情報
connection_config = {
'user': 'fkubota',
'password': '<PASSWORD>',
'host': 'localhost',
'port': '5432',
'database': 'pandas_db'
}
# PostgreSQLに接続する
engine = create_engine('postgresql://{user}: \
{password}@{host}: \
{port}/{database}'.format(**connection_config))
# pandas
df = pd.read_sql(sql='SELECT * FROM pandas_table;', con=engine)
name = 'jiro'
age = 9
df_2 =
|
pd.DataFrame([[name, age]], columns=['name', 'age'])
|
pandas.DataFrame
|
import unittest
import pandas as pd
from mavedbconvert import validators, constants, exceptions
class TestHGVSPatternsBackend(unittest.TestCase):
def setUp(self):
self.backend = validators.HGVSPatternsBackend()
def test_validate_hgvs_raise_HGVSValidationError(self):
with self.assertRaises(exceptions.HGVSValidationError):
self.backend.validate("p.1102A>G")
with self.assertRaises(exceptions.HGVSValidationError):
self.backend.validate("x.102A>G")
def test_validate_passes_on_special(self):
self.backend.validate(constants.enrich2_wildtype)
self.backend.validate(constants.enrich2_synonymous)
def test_returns_str_variant(self):
self.assertIsInstance(self.backend.validate("c.1A>G"), str)
class TestValidateHGVS(unittest.TestCase):
def test_uses_patterns_backend_as_default(self):
result = validators.validate_variants(["c.[1A>G;2A>G]"], n_jobs=2, verbose=0)
self.assertIsInstance(result[0], str)
def test_can_specify_backend(self):
backend = validators.HGVSPatternsBackend()
result = validators.validate_variants(
["c.[1A>G;2A>G]"], n_jobs=2, verbose=0, validation_backend=backend
)
self.assertIsInstance(result[0], str)
class TestDfValidators(unittest.TestCase):
def test_validate_column_raise_keyerror_column_not_exist(self):
df = pd.DataFrame({"a": [1]})
with self.assertRaises(KeyError):
validators.validate_has_column(df, "b")
def test_validate_column_passes_when_column_exists(self):
df = pd.DataFrame({"a": [1]})
validators.validate_has_column(df, "a")
def test_error_some_values_non_numeric(self):
df =
|
pd.DataFrame({"A": ["a", 1, 2]})
|
pandas.DataFrame
|
#predictTNA.py
#AUTHOR: <NAME>, 1425458
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing as pp
import pandas as pd
from sklearn.linear_model import Lasso
#importing the data
probeA = pd.read_csv("../probeA.csv",header=0)
probeB = pd.read_csv("../probeB.csv",header=0)
#utility functions needed for preprocessing phase
# some values are "swapped" betweens columns, so we need to reorder:
def reorder(df):
# From the initial data exploration it was clear that ther was some corruption in the form of a pemutation
#for each of the 4 proteins. This code reorders it.
copydf=df.copy()
for letter in ["c","m","n","p"]:
old_c = copydf[[letter+"1",letter+"2",letter+"3"]]
c = old_c.values
c.sort(axis=1)
c_df = pd.DataFrame(c,columns=old_c.columns)
copydf[old_c.columns] = c_df
return copydf
#scale data in standard way
def scale_data(dataFrame):
df = dataFrame.copy()
for var in df:
mean = df[var].mean()
std = df[var].std()
assert(std != 0)
df[var] = (df[var]-mean)/std
return df
#reorder data
probeA = reorder(probeA)
probeB = reorder(probeB)
#we define probeA_data to be the data of probeA with class and tna removed
probeA_data =probeA.drop('tna',1).drop('class',1)
#standardisation
probeA_data_std = scale_data(probeA_data)
#we define probeB_data to be the data of probeB with class and tna removed
probeB_data =probeB
#standardisation
probeB_data_std = scale_data(probeB_data)
tna_target = probeA['tna']
class_target = probeA['class']
#feature expansion
def polynomial_feature_ord(X,n):
poly = pp.PolynomialFeatures(n)
out = poly.fit_transform(X)
feature_names = poly.get_feature_names(X.columns)
X_new = pd.DataFrame(out,columns =feature_names)
return X_new
#we save variouse polynomial expansions in case useful
data_ord_2 = polynomial_feature_ord(probeA_data_std,2)
data_ord_3 = polynomial_feature_ord(probeA_data_std,3)
data_ord_4 = polynomial_feature_ord(probeA_data_std,4)
data_ord_2_B = polynomial_feature_ord(probeB_data_std,2)
data_ord_3_B = polynomial_feature_ord(probeB_data_std,3)
data_ord_4_B = polynomial_feature_ord(probeB_data_std,4)
#feature selection (these were found by extensive search)
features =[u'c3', u'm1 n3', u'm3 n3', u'n1 n3', u'p1^2']
data = data_ord_2[features]
#model selection (hyperparmeters fine-tuned by extensive search)
m = Lasso(alpha=0.0043)
fitted = m.fit(data,tna_target)
predict = m.predict(data_ord_2_B[features])
#saves prediction
df =
|
pd.DataFrame(predict)
|
pandas.DataFrame
|
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-
|
Index(date3)
|
pandas.Index
|
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ts_charting.formatter as formatter
plot_index = pd.date_range(start="2000-1-1", freq="B", periods=10000)
class TestTimestampLocator(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_inferred_freq(self):
"""
inferred freqs are based off of min_ticks
"""
plot_index = pd.date_range(start="2000-1-1", freq="B", periods=10000)
tl = formatter.TimestampLocator(plot_index)
# showing only the first 10 should give us days
xticks = tl._process(1, 10)
assert tl.gen_freq == 'D'
# showing only the first 70 should give us weeks
xticks = tl._process(1, 6 * 7 + 1)
assert tl.gen_freq == 'W'
# months should trigger at around 6 * 31
xticks = tl._process(1, 6 * 31 )
assert tl.gen_freq == 'MS'
# year should trigger at around 6 *366
xticks = tl._process(1, 6 * 366 + 1)
assert tl.gen_freq == 'AS'
def test_fixed_freq(self):
"""
Test passing in a fixed freq. This will allow len(xticks)
less than min_ticks
"""
plot_index = pd.date_range(start="2000-1-1", freq="D", periods=10000)
tl = formatter.TimestampLocator(plot_index, 'MS')
xticks = tl._process(0, 30*3)
assert len(xticks) == 3
tl = formatter.TimestampLocator(plot_index, 'MS')
xticks = tl._process(0, 30*6)
assert len(xticks) == 6
tl = formatter.TimestampLocator(plot_index, 'W')
xticks = tl._process(0, 10*7)
assert len(xticks) == 10
tl = formatter.TimestampLocator(plot_index, 'AS')
xticks = tl._process(0, 10 * 365)
assert len(xticks) == 10
def test_bool_xticks(self):
"""
ability to set ticks with a bool series where True == tick
"""
plot_index = pd.date_range(start="2000-1-1", freq="D", periods=10000)
freq = 'M'
ds = pd.Series(1, index=plot_index)
# True when freq market is hit
bool_ticks = ds.resample(freq).reindex(plot_index).fillna(0).astype(bool)
tl = formatter.TimestampLocator(plot_index, xticks=bool_ticks)
xticks = tl._process(0, 90)
tl = formatter.TimestampLocator(plot_index, freq=freq)
correct = tl._process(0, 90)
tm.assert_almost_equal(xticks, correct)
freq = 'MS'
ds = pd.Series(1, index=plot_index)
# True when freq market is hit
bool_ticks = ds.resample(freq).reindex(plot_index).fillna(0).astype(bool)
tl = formatter.TimestampLocator(plot_index, xticks=bool_ticks)
xticks = tl._process(3, 94)
tl = formatter.TimestampLocator(plot_index, freq=freq)
correct = tl._process(3, 94)
tm.assert_almost_equal(xticks, correct)
freq = 'W'
ds = pd.Series(1, index=plot_index)
# True when freq market is hit
bool_ticks = ds.resample(freq).reindex(plot_index).fillna(0).astype(bool)
tl = formatter.TimestampLocator(plot_index, xticks=bool_ticks)
xticks = tl._process(3, 94)
tl = formatter.TimestampLocator(plot_index, freq=freq)
correct = tl._process(3, 94)
tm.assert_almost_equal(xticks, correct)
def test_list_of_datetimes(self):
"""
The other xticks option is sending in a DatetimeIndex of the dates you want
"""
plot_index = pd.date_range(start="2000-1-1", freq="D", periods=10000)
freq = 'M'
dates = pd.Series(1, index=plot_index).resample(freq).index
tl = formatter.TimestampLocator(plot_index, xticks=dates)
test = tl._process(3, 900)
tl = formatter.TimestampLocator(plot_index, freq=freq)
correct = tl._process(3, 900)
tm.assert_almost_equal(test, correct)
freq = 'MS'
dates = pd.Series(1, index=plot_index).resample(freq).index
tl = formatter.TimestampLocator(plot_index, xticks=dates)
test = tl._process(3, 900)
tl = formatter.TimestampLocator(plot_index, freq=freq)
correct = tl._process(3, 900)
|
tm.assert_almost_equal(test, correct)
|
pandas.util.testing.assert_almost_equal
|
import numpy as np
import json
import sys
import os
import argparse
from scipy.io import netcdf
from ipdb import set_trace as stop
import keras.backend as K
import tensorflow as tf
from keras.callbacks import CSVLogger, LearningRateScheduler, ModelCheckpoint
from keras.layers import Input, Lambda, Dense, Flatten, BatchNormalization, Activation, Conv1D, add, concatenate, GaussianNoise
from keras.models import Model, load_model
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Adam
from keras.utils import plot_model
from sklearn.cluster import KMeans, AgglomerativeClustering
import pandas as pd
from contextlib import redirect_stdout
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def flush_file(f):
f.flush()
os.fsync(f.fileno())
def residual(inputs, n_filters, activation, strides):
x0 = Conv1D(n_filters, 1, padding='same', kernel_initializer='he_normal', strides=strides)(inputs)
x = Conv1D(n_filters, 3, padding='same', kernel_initializer='he_normal', strides=strides)(inputs)
x = BatchNormalization()(x)
if (activation == 'prelu'):
x = PReLU()(x)
else:
x = Activation(activation)(x)
x = Conv1D(n_filters, 3, padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = add([x0, x])
return x
def sample_center_points(y, method='all', k=100, keep_edges=False):
"""
function to define kernel centers with various downsampling alternatives
"""
# make sure y is 1D
y = y.ravel()
# keep all points as kernel centers
if method is 'all':
return y
# retain outer points to ensure expressiveness at the target borders
if keep_edges:
y = np.sort(y)
centers = np.array([y[0], y[-1]])
y = y[1:-1]
# adjust k such that the final output has size k
k -= 2
else:
centers = np.empty(0)
if method is 'random':
cluster_centers = np.random.choice(y, k, replace=False)
# iteratively remove part of pairs that are closest together until everything is at least 'd' apart
elif method is 'distance':
raise NotImplementedError
# use 1-D k-means clustering
elif method is 'k_means':
model = KMeans(n_clusters=k, n_jobs=-2)
model.fit(y.reshape(-1, 1))
cluster_centers = model.cluster_centers_
# use agglomerative clustering
elif method is 'agglomerative':
model = AgglomerativeClustering(n_clusters=k, linkage='complete')
model.fit(y.reshape(-1, 1))
labels = pd.Series(model.labels_, name='label')
y_s = pd.Series(y, name='y')
df =
|
pd.concat([y_s, labels], axis=1)
|
pandas.concat
|
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset =
|
to_offset(freq_str)
|
pandas.tseries.frequencies.to_offset
|
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(
|
pd.Series(dtype="object")
|
pandas.Series
|
# Debit card data compilation
import pandas as pd
cols_list = ['UNI_PT_KEY', 'CIF', 'CARD_CLASS_CODE', 'CARD_NUM', 'PRODUCT',
'PRIMARY_ACCOUNT', 'CARD_SEGMENT', 'CARD_BIN', 'CARD_RANGE', 'EMBLEM_ID',
'ACCOUNT_OPEN_DATE', 'CARD_ISSUE_DATE', 'CARD_EXPIRY_DATE', 'CARD_ACTIVATION_DATE',
'FIRST_TRN_DATE', 'CARD_ACT_FLAG','IS_CARD_WITH_TOKEN']
debit = pd.read_csv("debitcards.csv", usecols=cols_list, dtype=str, sep=";", error_bad_lines=False, low_memory=False)
a = debit["CARD_NUM"].nunique()
b = debit["UNI_PT_KEY"].nunique()
c = debit["CIF"].nunique()
print("# of UNI_PT_KEY = " +str(b))
print("# of CARD_NUM = " + str(a))
print("# of CIF = " + str(c))
#other products
other_products = pd.read_csv("other_metrics.csv", sep=";", dtype=str)
other_products["OTHER_PRODUCTS"] = 1
dc_other_products = debit.merge(other_products, how="left", on="UNI_PT_KEY")
dc_other_products["OTHER_PRODUCTS"] = dc_other_products["OTHER_PRODUCTS"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_other_products["OTHER_PRODUCTS"].astype(int).sum()))
#mobile banking
mobile_banking = pd.read_csv("mobile_banking.csv", sep=";", dtype=str)
mobile_banking["MOBILE_BANKING"] = 1
mobile_banking = pd.DataFrame(mobile_banking)
dc_mobile_banking = dc_other_products.merge(mobile_banking, how="left", on="UNI_PT_KEY")
dc_mobile_banking["MOBILE_BANKING"] = dc_mobile_banking["MOBILE_BANKING"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_mobile_banking["MOBILE_BANKING"].astype(int).sum()))
#internet banking
internet_banking = pd.read_csv("internet_banking.csv", sep=";", dtype=str)
internet_banking["INTERNET_BANKING"] = 1
dc_internet_banking = dc_mobile_banking.merge(internet_banking, how="left", on="UNI_PT_KEY")
dc_internet_banking["INTERNET_BANKING"] = dc_internet_banking["INTERNET_BANKING"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_internet_banking["INTERNET_BANKING"].astype(int).sum()))
#branch delivery
branch_delivery = pd.read_csv("branch_delivery.csv", sep=";", dtype=str)
branch_delivery["BRANCH_DELIVERY"] = 1
dc_branch_delivery = dc_internet_banking.merge(branch_delivery, how="left", on="CARD_NUM")
dc_branch_delivery["BRANCH_DELIVERY"] = dc_branch_delivery["BRANCH_DELIVERY"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_branch_delivery["BRANCH_DELIVERY"].astype(int).sum()))
#staff
staff = pd.read_csv("staff_flag.csv", sep=";", dtype=str)
staff["STAFF_FLAG"] = 1
dc_staff_flag = dc_branch_delivery.merge(staff, how="left", on="UNI_PT_KEY")
dc_staff_flag["STAFF_FLAG"] = dc_staff_flag["STAFF_FLAG"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_staff_flag["STAFF_FLAG"].astype(int).sum()))
#email phone
email_phone = pd.read_csv("contact_email_phone.csv", sep=";", dtype=str, error_bad_lines=False, low_memory=False)
dc_email_phone = dc_staff_flag.merge(email_phone, how="left", on ="UNI_PT_KEY")
#contact address
contact_address = pd.read_csv("customer_address.csv", sep=";", dtype=str)
dc_contact_address = dc_email_phone.merge(contact_address, how="left", on="CARD_NUM")
# owner vs holder
owner_vs_holder =
|
pd.read_csv("card_ownervsholder_dc.csv", sep=";")
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import joblib, os, logging
from joblib import Parallel, delayed
from workalendar.europe import Greece
from scipy.interpolate import interp2d
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def stack_2d(X, sample, compress):
if compress:
sample = rescale(sample, 8, 8)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape)!=len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
class dataset_creator_LV():
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1, test=False):
self.projects = projects
self.isfortest = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.create_logger()
self.check_dates()
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = data_variables
def create_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.path_nwp), 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def check_empty_nwp(self, nwp, variables):
flag = True
for var in variables:
if nwp[var].shape[0] == 0:
flag = False
break
return flag
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, compress):
X = dict()
fname = os.path.join(path_nwp, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H').strftime(
'%d%m%y%H%M')
for project in projects:
X[project['_id']] = pd.DataFrame()
areas = project['static_data']['areas']
x =
|
pd.DataFrame()
|
pandas.DataFrame
|
import gc
import eli5
import lightgbm as lgb
import numpy as np
import pandas as pd
import xgboost as xgb
from catboost import CatBoost, Pool
from IPython.display import display
from sklearn import metrics
from sklearn.metrics import (
make_scorer,
roc_auc_score,
mean_squared_error,
f1_score,
)
import src.common.com_util as util
__all__ = [
"cat_train_validate_on_holdout",
"xgb_train_validate_on_holdout",
"lgb_train_validate_on_holdout",
"xgb_train_validate_on_cv",
"lgb_train_validate_on_cv",
"cat_train_validate_on_cv",
"sklearn_train_validate_on_cv",
"lgb_train_perm_importance_on_cv",
"evaluate_macroF1_xgb",
"evaluate_macroF1_lgb",
"_calculate_perf_metric",
"f1_score_weighted",
"evaluate_macroF1_lgb_sklearn_api",
"_get_X_Y_from_CV",
"lgb_train_validate_on_cv_mean_encoding",
]
def evaluate_macroF1_lgb(y_hat, data):
"""
Custom F1 Score to be used for multiclass classification using lightgbm.
This function should be passed as a value to the parameter feval.
weighted average takes care of imbalance
https://stackoverflow.com/questions/57222667/light-gbm-early-stopping-does-not-work-for-custom-metric
https://stackoverflow.com/questions/52420409/lightgbm-manual-scoring-function-f1-score
https://stackoverflow.com/questions/51139150/how-to-write-custom-f1-score-metric-in-light-gbm-python-in-multiclass-classifica
"""
y = data.get_label()
y_hat = y_hat.reshape(-1, len(np.unique(y))).argmax(axis=1)
f1 = f1_score(y_true=y, y_pred=y_hat, average="weighted")
return ("weightedF1", f1, True)
def evaluate_macroF1_lgb_sklearn_api(y, y_hat):
"""
Custom F1 Score to be used for multiclass classification using lightgbm.
This function should be passed as a value to the parameter eval_metric for
the LGBM sklearn API.
weighted average takes care of imbalance
https://github.com/Microsoft/LightGBM/issues/1453
"""
y_hat = y_hat.reshape(-1, len(np.unique(y))).argmax(axis=1)
f1 = f1_score(y_true=y, y_pred=y_hat, average="weighted")
return ("weightedF1", f1, True)
def evaluate_macroF1_xgb(y_hat, data):
"""
Custom F1 Score to be used for multiclass classification using xgboost.
This function should be passed as a value to the parameter feval.
weighted average takes care of imbalance
https://stackoverflow.com/questions/51587535/custom-evaluation-function-based-on-f1-for-use-in-xgboost-python-api
https://www.kaggle.com/c/expedia-hotel-recommendations/discussion/21439
"""
y = data.get_label()
y_hat = y_hat.reshape(-1, len(np.unique(y))).argmax(axis=1)
f1 = f1_score(y_true=y, y_pred=y_hat, average="weighted")
return ("weightedF1", f1)
def f1_score_weighted(y, y_hat):
"""
It's assumed that y_hat consists of a two dimensional array.
Each array in the first dimension has probabilities for all the
classes, i.e. if there are 43 classes and 1000 rows of data, the y_hat
has a dimension (1000, 43)
"""
y_hat = y_hat.reshape(-1, len(np.unique(y))).argmax(axis=1)
return f1_score(y_true=y, y_pred=y_hat, average="weighted")
def roc_auc(y, y_hat):
return roc_auc_score(y, y_hat)
def log_loss(y, y_hat):
return metrics.log_loss(y_true=y, y_pred=y_hat)
def rmse(y, y_hat):
return np.sqrt(mean_squared_error(y, y_hat))
def rmsle(y, y_hat):
return np.sqrt(np.mean(np.power(np.log1p(y_hat) - np.log1p(y), 2)))
def precision_weighted(y, y_hat):
return metrics.precision_score(y_true=y, y_pred=y_hat, average="weighted")
def recall_weighted(y, y_hat):
return metrics.recall_score(y_true=y, y_pred=y_hat, average="weighted")
def _calculate_perf_metric(metric_name, y, y_hat):
"""Returns the performance metrics
Args:
y: Real value
y_hat: predicted value
Returns:
Metrics computed
"""
if metric_name == "rmse":
return rmse(y, y_hat)
elif metric_name == "rmsle":
return rmsle(y, y_hat)
elif metric_name == "roc_auc":
return roc_auc(y, y_hat)
elif metric_name == "log_loss":
return log_loss(y, y_hat)
elif metric_name == "f1_score_weighted":
return f1_score_weighted(y, y_hat)
elif metric_name == "precision_weighted":
return precision_weighted(y, y_hat)
elif metric_name == "recall_weighted":
return recall_weighted(y, y_hat)
else:
raise ValueError(
"Invalid value for metric_name. Only rmse, rmsle, roc_auc, log_loss allowed"
)
def _get_scorer(metric_name):
if metric_name == "roc_auc":
return metrics.roc_auc_score
elif metric_name == "log_loss":
return metrics.log_loss
else:
raise ValueError(
"Invalid value for metric_name. Only rmse, rmsle, roc_auc, log_loss allowed"
)
def _get_random_seeds(i):
"""
returns 10 seeds
"""
seed_list = [42, 103, 13, 31, 17, 23, 46, 57, 83, 93]
return seed_list[i - 1]
def _get_x_y_from_data(logger, df, predictors, target):
"""Returns X & Y from a DataFrame"""
if df is not None:
df_X = df[predictors]
df_Y = df[target]
return df_X, df_Y
def _get_x_y_from_training_validation(logger, training, validation, predictors, target):
"""Returns X & Y for training & validation data"""
if training is not None:
training_X, training_Y = _get_x_y_from_data(
logger, training, predictors, target
)
if validation is not None:
validation_X, validation_Y = _get_x_y_from_data(
logger, validation, predictors, target
)
return training_X, training_Y, validation_X, validation_Y
def cat_train_validate_on_holdout(
logger,
run_id,
training,
validation,
predictors,
target,
cat_features,
params,
test_X=None,
label_name="",
log_target=False,
):
"""Train a CatBoost model, validate on holdout data.
If `test_X` has a valid value, creates a new model with number of best iteration
found during holdout phase using training as well as validation data.
Args:
logger: Logger to be used
training: Training DataFrame
validation: Validation DataFrame
predictors: List of names of features
target: Name of target variable
params: Parameters for CatBoost
test_X: Test DataFrame
Returns:
bst: CatBoost model
valid_score: Best validation score
best_iteration: Value of best iteration
prediction: Prediction generated on `test_X`
"""
result_dict = {}
logger.info("Training using CatBoost and validating on holdout")
train_X, train_Y, validation_X, validation_Y = _get_x_y_from_training_validation(
logger, training, validation, predictors, target
)
logger.info(
(
f"Shape of train_X, train_Y, validation_X, validation_Y: "
f"{train_X.shape}, {train_Y.shape}, {validation_X.shape}, {validation_Y.shape}"
)
)
if log_target:
train_pool = Pool(
data=train_X,
label=np.log1p(train_Y),
feature_names=predictors,
cat_features=cat_features,
)
valid_pool = Pool(
data=validation_X,
label=np.log1p(validation_Y),
feature_names=predictors,
cat_features=cat_features,
)
else:
train_pool = Pool(
data=train_X,
label=train_Y,
feature_names=predictors,
cat_features=cat_features,
)
valid_pool = Pool(
data=validation_X,
label=validation_Y,
feature_names=predictors,
cat_features=cat_features,
)
model = CatBoost(params=params)
# List of categorical features have already been passed as a part of Pool
# above. No need to pass via the argument of fit()
model.fit(X=train_pool, eval_set=[train_pool, valid_pool], use_best_model=True)
best_iteration = model.get_best_iteration()
if log_target:
valid_prediction = np.expm1(model.predict(valid_pool))
else:
valid_prediction = model.predict(valid_pool)
valid_score = _calculate_perf_metric(validation_Y, valid_prediction)
logger.info(f"Validation Score {valid_score}")
logger.info(f"Best Iteration {best_iteration}")
del train_pool, valid_pool, train_X, train_Y, validation_X, validation_Y
gc.collect()
if test_X is not None:
logger.info("Retraining on the entire data including validation")
training = pd.concat([training, validation])
train_X, train_Y = _get_x_y_from_data(logger, training, predictors, target)
logger.info(
(f"Shape of train_X, train_Y: " f"{train_X.shape}, {train_Y.shape}")
)
if log_target:
train_pool = Pool(
data=train_X,
label=np.log1p(train_Y),
feature_names=predictors,
cat_features=cat_features,
)
else:
train_pool = Pool(
data=train_X,
label=train_Y,
feature_names=predictors,
cat_features=cat_features,
)
test_pool = Pool(
data=test_X, feature_names=predictors, cat_features=cat_features
)
# Why?
params.pop("eval_metric")
params.pop("early_stopping_rounds")
params.pop("use_best_model")
params["n_estimators"] = best_iteration
logger.info(f"Modified parameters for final model training.. {params}")
model = CatBoost(params=params)
model.fit(X=train_pool)
logger.info(f"Predicting on test data: {test_X.shape}")
if log_target:
prediction = np.expm1(model.predict(test_pool))
else:
prediction = model.predict(test_pool)
result_dict = _evaluate_and_log_for_holdout(
logger=logger,
run_id=run_id,
valid_prediction=valid_prediction,
valid_score=valid_score,
y_predicted=prediction,
result_dict=result_dict,
best_iteration=best_iteration,
label_name=label_name,
)
feature_importance = model.get_feature_importance()
result_dict = _capture_feature_importance_for_holdout(
feature_importance=feature_importance,
features=predictors,
result_dict=result_dict,
)
logger.info("Training/Prediction completed!")
return result_dict
def xgb_train_validate_on_holdout(
logger,
run_id,
training,
validation,
predictors,
target,
params,
test_X=None,
n_estimators=10000,
early_stopping_rounds=100,
verbose_eval=100,
label_name="",
log_target=False,
):
"""Train a XGBoost model, validate on holdout data. If `test_X`
has a valid value, creates a new model with number of best iteration
found during holdout phase using training as well as validation data.
Args:
logger: Logger to be used
training: Training DataFrame
validation: Validation DataFrame
predictors: List of names of features
target: Name of target variable
params: Parameters for XGBoost
test_X: Test DataFrame
Returns:
bst: XGB Booster object
valid_score: Best validation score
best_iteration: Value of best iteration
prediction: Prediction generated on `test_X`
"""
result_dict = {}
logger.info("Training using XGBoost and validating on holdout")
train_X, train_Y, validation_X, validation_Y = _get_x_y_from_training_validation(
logger, training, validation, predictors, target
)
logger.info(
(
f"Shape of train_X, train_Y, validation_X, validation_Y: "
f"{train_X.shape}, {train_Y.shape}, {validation_X.shape}, {validation_Y.shape}"
)
)
if log_target:
dtrain = xgb.DMatrix(
data=train_X, label=np.log1p(train_Y), feature_names=predictors
)
dvalid = xgb.DMatrix(
data=validation_X, label=np.log1p(validation_Y), feature_names=predictors
)
else:
dtrain = xgb.DMatrix(data=train_X, label=train_Y, feature_names=predictors)
dvalid = xgb.DMatrix(
data=validation_X, label=validation_Y, feature_names=predictors
)
watchlist = [(dtrain, "train"), (dvalid, "valid_data")]
bst = xgb.train(
dtrain=dtrain,
num_boost_round=n_estimators,
evals=watchlist,
early_stopping_rounds=early_stopping_rounds,
params=params,
verbose_eval=verbose_eval,
)
if log_target:
valid_prediction = np.expm1(
bst.predict(
xgb.DMatrix(validation_X, feature_names=predictors),
ntree_limit=bst.best_ntree_limit,
)
)
else:
valid_prediction = bst.predict(
xgb.DMatrix(validation_X, feature_names=predictors),
ntree_limit=bst.best_ntree_limit,
)
# Get best iteration
best_iteration = bst.best_ntree_limit
valid_score = _calculate_perf_metric(validation_Y, valid_prediction)
logger.info(f"Validation Score {valid_score}")
logger.info(f"Best Iteration {best_iteration}")
del watchlist, dtrain, dvalid, train_X, train_Y, validation_X, validation_Y
gc.collect()
if test_X is not None:
logger.info("Retraining on the entire data including validation")
training = pd.concat([training, validation])
train_X, train_Y = _get_x_y_from_data(logger, training, predictors, target)
logger.info(
(f"Shape of train_X, train_Y: " f"{train_X.shape}, {train_Y.shape}")
)
if log_target:
dtrain = xgb.DMatrix(
data=train_X, label=np.log1p(train_Y), feature_names=predictors
)
dtest = xgb.DMatrix(data=test_X, feature_names=predictors)
else:
dtrain = xgb.DMatrix(data=train_X, label=train_Y, feature_names=predictors)
dtest = xgb.DMatrix(data=test_X, feature_names=predictors)
bst = xgb.train(
dtrain=dtrain,
num_boost_round=best_iteration,
params=params,
verbose_eval=verbose_eval,
)
logger.info(f"Predicting on test data: {test_X.shape}")
if log_target:
prediction = np.expm1(bst.predict(dtest, ntree_limit=best_iteration))
else:
prediction = bst.predict(dtest, ntree_limit=best_iteration)
result_dict = _evaluate_and_log_for_holdout(
logger=logger,
run_id=run_id,
valid_prediction=valid_prediction,
valid_score=valid_score,
y_predicted=prediction,
result_dict=result_dict,
best_iteration=best_iteration,
label_name=label_name,
)
# XGB may not use all the features while building the
# model. Consider only the useful features
feature_importance_values = bst.get_score().values()
feature_importance_features = bst.get_score().keys()
result_dict = _capture_feature_importance_for_holdout(
feature_importance=feature_importance_values,
features=feature_importance_features,
result_dict=result_dict,
)
logger.info("Training/Prediction completed!")
return result_dict
def lgb_train_validate_on_holdout(
logger,
run_id,
training,
validation,
test_X,
predictors,
target,
params,
n_estimators=10000,
early_stopping_rounds=100,
cat_features="auto",
verbose_eval=100,
label_name="",
log_target=False,
):
"""Train a LGB model and validate on holdout data.
Args:
logger: Logger to be used
training: Training DataFrame
validation: Validation DataFrame
predictors: List of names of features
target: Name of target variable
params: Parameters for LGBM
test_X: Test DataFrame
Returns:
bst: LGB Booster object
valid_score: Best validation score
best_iteration: Value of best iteration
prediction: Prediction generated on `test_X`
"""
result_dict = {}
logger.info("Training using LGB and validating on holdout")
train_X, train_Y, validation_X, validation_Y = _get_x_y_from_training_validation(
logger, training, validation, predictors, target
)
logger.info(
(
f"Shape of train_X, train_Y, validation_X, validation_Y: "
f"{train_X.shape}, {train_Y.shape}, {validation_X.shape}, {validation_Y.shape}"
)
)
if log_target:
dtrain = lgb.Dataset(train_X, label=np.log1p(train_Y))
dvalid = lgb.Dataset(validation_X, np.log1p(validation_Y))
else:
dtrain = lgb.Dataset(train_X, label=train_Y)
dvalid = lgb.Dataset(validation_X, validation_Y)
bst = lgb.train(
params,
dtrain,
valid_sets=[dtrain, dvalid],
verbose_eval=verbose_eval,
num_boost_round=n_estimators,
early_stopping_rounds=early_stopping_rounds,
feature_name=predictors,
categorical_feature=cat_features,
)
best_iteration = bst.best_iteration
if log_target:
valid_prediction = np.expm1(
bst.predict(validation_X, num_iteration=best_iteration)
)
else:
valid_prediction = bst.predict(validation_X, num_iteration=best_iteration)
valid_score = _calculate_perf_metric(validation_Y, valid_prediction)
logger.info(f"Validation Score {valid_score}")
logger.info(f"Best Iteration {best_iteration}")
del dtrain, dvalid, train_X, train_Y, validation_X, validation_Y
gc.collect()
if test_X is not None:
logger.info("Retraining on the entire data including validation")
training =
|
pd.concat([training, validation])
|
pandas.concat
|
print("Importing libraries...")
# import libraries
from bs4 import BeautifulSoup
import urllib.request
import pickle
import pandas as pd
print("Done importing.")
# scrap from sinonimkata.com
with open('pickle/words.pkl', 'rb') as f:
words = pickle.load(f)
print("Generating thesaurus..")
thesaurus = {}
words = list(set(words))
for x in words:
name = x
data = { "q": name }
encoded_data = urllib.parse.urlencode(data).encode("utf-8")
content = urllib.request.urlopen("http://www.sinonimkata.com/search.php", encoded_data)
soup = BeautifulSoup(content, 'html.parser')
try:
synonym = soup.find('td', attrs={'width': '90%'}).find_all('a')
synonym = [x.getText() for x in synonym]
thesaurus[x] = [x] + synonym
except:
thesaurus[x] = [name]
print("Thesaurus for '" +x+ "' done.")
print("Done generating.")
# save results to 'corpus/thesaurus.xlsx'
thesaurus_list = [[x, thesaurus[x]] for x in thesaurus]
print("Saving data to corpus/thesaurus.xlsx..")
df =
|
pd.DataFrame(thesaurus_list)
|
pandas.DataFrame
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import typing
import unittest
import pandas as pd
from past.builtins import unicode
import apache_beam as beam
from apache_beam import coders
from apache_beam.dataframe import convert
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import transforms
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
def sort_by_value_and_drop_index(df):
if isinstance(df, pd.DataFrame):
sorted_df = df.sort_values(by=list(df.columns))
else:
sorted_df = df.sort_values()
return sorted_df.reset_index(drop=True)
def check_correct(expected, actual, check_index=False):
if actual is None:
raise AssertionError('Empty frame but expected: \n\n%s' % (expected))
if isinstance(expected, pd.core.generic.NDFrame):
sorted_actual = sort_by_value_and_drop_index(actual)
sorted_expected = sort_by_value_and_drop_index(expected)
if not sorted_actual.equals(sorted_expected):
raise AssertionError(
'Dataframes not equal: \n\n%s\n\n%s' %
(sorted_actual, sorted_expected))
else:
if actual != expected:
raise AssertionError('Scalars not equal: %s != %s' % (actual, expected))
def concat(parts):
if len(parts) > 1:
return
|
pd.concat(parts)
|
pandas.concat
|
import glob
import math
import brewer2mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator
SPINE_COLOR = 'gray'
#####################################################
# Process average from files #
#####################################################
def process_average(folder, scenarios, labels, header):
columns = ['property']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=header, names=columns)['property']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result = pd.concat(dfs1, axis=1)
return result
class Plotter():
#####################################################
# Latexify #
#####################################################
@staticmethod
def latexify(fig_width=None, fig_height=None, columns=1, fullwidth=False):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
if fullwidth:
fig_width = 3.39*2 if columns==1 else 6.9 # width in inches
else:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
if fullwidth:
fig_height = fig_width*golden_mean/2.0 # height in inches
else:
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {
'backend': 'ps',
'text.latex.preamble': ['\\usepackage{amssymb}'],
'axes.labelsize': 5, # fontsize for x and y labels (was 10)
'axes.titlesize': 5,
'lines.markersize' : 3,
'lines.markeredgewidth': 0.3,
'legend.fontsize': 4, # was 10
'text.usetex': True,
'legend.edgecolor': 'w',
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif',
'grid.linestyle': 'dashed',
'grid.color': 'grey',
'lines.dashed_pattern' : [150, 150],
'xtick.color': 'k',
'ytick.color': 'k',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.minor.width': 0.05,
'ytick.minor.width': 0.05,
'xtick.major.width': 0.1,
'ytick.major.width': 0.1,
'xtick.labelsize': 4,
'ytick.labelsize': 4,
'lines.linewidth' : 0.2,
'grid.linewidth': 0.01,
'axes.linewidth': 0.2,
'errorbar.capsize' : 1,
'xtick.minor.visible': False, # visibility of minor ticks on x-axis
# 'ytick.minor.visible': False, # visibility of minor ticks on x-axis
'boxplot.notch': False,
'boxplot.vertical': True,
'boxplot.whiskers': 1.5,
'boxplot.bootstrap': None,
'boxplot.patchartist': False,
'boxplot.showmeans': False,
'boxplot.showcaps': True,
'boxplot.showbox': True,
'boxplot.showfliers': True,
'boxplot.meanline': False,
'boxplot.flierprops.color': 'lightgrey',
'boxplot.flierprops.marker': 'o',
'boxplot.flierprops.markerfacecolor': 'none',
'boxplot.flierprops.markeredgecolor': 'lightgrey',
'boxplot.flierprops.markersize': 1,
'boxplot.flierprops.linestyle': 'none',
'boxplot.flierprops.linewidth': 0.1,
'boxplot.boxprops.color': 'C2',
'boxplot.boxprops.linewidth': 0.2,
'boxplot.boxprops.linestyle': '-',
'boxplot.whiskerprops.color': 'C2',
'boxplot.whiskerprops.linewidth': 0.2,
'boxplot.whiskerprops.linestyle': '-',
'boxplot.capprops.color': 'C2',
'boxplot.capprops.linewidth': 0.2,
'boxplot.capprops.linestyle': '-',
'boxplot.medianprops.color': 'C2',
'boxplot.medianprops.linewidth': 0.20,
'boxplot.medianprops.linestyle': '-',
'boxplot.meanprops.color': 'C2',
'boxplot.meanprops.marker': '^',
'boxplot.meanprops.markerfacecolor': 'C2',
'boxplot.meanprops.markeredgecolor': 'C2',
'boxplot.meanprops.markersize': 6,
'boxplot.meanprops.linestyle': 'none',
'boxplot.meanprops.linewidth': 0.20,
}
matplotlib.rcParams.update(params)
# for spine in ['top', 'right']:
# ax.spines[spine].set_visible(False)
# for spine in ['left', 'bottom']:
# ax.spines[spine].set_color(SPINE_COLOR)
# ax.spines[spine].set_linewidth(0.1)
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
# # Or if you want different settings for the grids:
# ax.grid(which='minor', alpha=0.2)
# ax.grid(which='major', alpha=0.5)
# for axis in [ax.xaxis, ax.yaxis]:
# axis.set_tick_params(direction='out', color=SPINE_COLOR)
# return ax
#####################################################
# Latency - Mean - 4 methods #
#####################################################
@staticmethod
def latency_avg_4methods(folder1, folder2, folder3, folder4, scenarios, labels, output, title, ylim, fullwidth=False):
plt.figure()
Plotter.latexify(fullwidth=fullwidth)
columns = ['latency']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder1 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=4, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result1 = pd.concat(dfs1, axis=1)
#print('result1\n', result1.describe())
#print('result1\n', result1.to_string())
std1 = result1.std()
ax1 = result1.mean().plot(label="Sourcey", legend = True, yerr=std1, color="red")
ax1.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs2 = []
for scenario in scenarios:
file = glob.glob(folder2 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs2 += [df]
result2 = pd.concat(dfs2, axis=1)
#print('result2\n', result2.describe())
#print('result2\n', result2.to_string())
std2 = result2.std()
ax2 = result2.mean().plot(label="Sourcey Fabric", legend = True, yerr=std2, color="orange")
ax2.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs3 = []
for scenario in scenarios:
file = glob.glob(folder3 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs3 += [df]
result3 = pd.concat(dfs3, axis=1)
#print('result3\n', result3.describe())
#print('result3\n', result3.to_string())
std3 = result3.std()
ax3 = result3.mean().plot(label="PolKA", legend = True, yerr=std3, color="blue")
ax3.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs4 = []
for scenario in scenarios:
file = glob.glob(folder4 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs4 += [df]
result4 = pd.concat(dfs4, axis=1)
#print('result4\n', result4.describe())
#print('result4\n', result4.to_string())
std4 = result4.std()
ax4 = result4.mean().plot(label="PolKA Fabric", legend = True, yerr=std4, color="green")
ax4.set_ylim(0, ylim)
ax4.tick_params(axis='both', which='major', labelsize=5)
ax4.grid(b=True, which='major', linestyle='dashed', axis='x')
ax4.grid(b=True, which='major', linestyle='dashed', axis='y')
ax4.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
plt.title(title)
plt.ylabel('RTT Latency (s)')
plt.xlabel('Number of Hops')
plt.tight_layout()
plt.savefig(output)
#####################################################
# Latency - Mean - 4 methods #
#####################################################
@staticmethod
def latency_avg_4methods_bar(folder1, folder2, folder3, folder4, scenarios, labels, output, title, ylim, fullwidth=False):
plt.figure()
Plotter.latexify(fullwidth=fullwidth)
columns = ['latency']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder1 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=4, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result1 = pd.concat(dfs1, axis=1)
std1 = result1.std()
dfs2 = []
for scenario in scenarios:
file = glob.glob(folder2 + '/' + scenario + '/a1*')
df =
|
pd.read_csv(file[0], header=None, names=columns)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
def to_bin_lat(y):
lat_step = 5.
binned_lat = np.floor(y / lat_step) * lat_step
return(binned_lat)
def to_bin_lon(x):
lon_step = 10.
binned_lon = np.floor(x / lon_step) * lon_step
return(binned_lon)
def monthly_mean_map(season_of_cosmic_data):
"""
Description
===========
Find the diurnal mean of the TLS temperatures in each 5x10 degree box
Arguments
=========
season_of_cosmic_data
--------
Pandas dataframe of GPS-RO TLS temperatures after ERA-5 daily mean has been subtracted
returns
=======
monthly_mean_map
----------------
Diurnal mean for one month of TLS temperature in every 5x10 box
"""
#Group cosmic data by lats and lons
season_of_cosmic_data["latbin"] = season_of_cosmic_data.Lat.map(to_bin_lat)
season_of_cosmic_data["lonbin"] = season_of_cosmic_data.Lon.map(to_bin_lon)
grid_groups = season_of_cosmic_data.groupby(("latbin", 'lonbin'))
# Set lats and lons
lats = np.arange(-90, 90., 5.)
lons = np.arange(-180, 180., 10.)
monthly_mean_map = []
for i in range(0, len(lats)):
monthly_mean_lat = []
for j in range(0, len(lons)):
try:
#Subset monthly data by lat lon and drop NaN values
key = (lats[i], lons[j])
cosmic_grid_cell = grid_groups.get_group(key)
cosmic_grid_cell.dropna(subset=["Temp"], inplace=True)
#Find monthly average temperatures
#NOTE: Because we are looking for deviations from daily means
# we must carefully remove the true daily average by averaging
# over all 8 local time hour bins
daily_average_bins = []
for hour_bin in np.linspace(0, 21, 8):
hour_bin_1 = cosmic_grid_cell[cosmic_grid_cell['Hour'] == int(hour_bin)]
hour_bin_2 = cosmic_grid_cell[cosmic_grid_cell['Hour'] == (int(hour_bin)+1)]
hour_bin_3 = cosmic_grid_cell[cosmic_grid_cell['Hour'] == (int(hour_bin)+2)]
hour_bin_total = pd.concat([hour_bin_1, hour_bin_2, hour_bin_3])
cosmic_temp_hour_bin = hour_bin_total['Temp'].mean()
daily_average_bins.append(cosmic_temp_hour_bin)
daily_mean_temp = np.nanmean(daily_average_bins)
monthly_mean_map.append([lats[i], lons[j], daily_mean_temp])
except:
t_list = [lats[i], lons[j], np.NaN]
monthly_mean_map.append(t_list)
return monthly_mean_map
def mean_map_removal(month_of_data_df, mean_TLS_map):
"""
Description
===========
Radio Occultation data differs from ERA-5 data because of bias inherent to ERA-5 in lower stratosphere.
mean_map_removal attempts to remove any bias in the diurnal cycles after the daily ERA-5 mean has been
removed so that this error is not aliased into the diurnal cycles.
Arguments
=========
month_of_data_df
--------
Pandas dataframe of GPS-RO TLS temperatures after ERA-5 daily mean has been subtracted
mean_TLS_map
------------
a 36x36 map of diurnal means to be removed from month_of_data_df
returns
=======
era_5_df_new
------------
Pandas dataframe with correctly wrapped longitude
"""
# Set lats and lons
lats = np.arange(-90, 90., 5.)
lons = np.arange(-180, 180., 10.)
mean_TLS_list = np.reshape(mean_TLS_map, (1296, 3)) # our map is 5degx10deg so 36binsx36bins=1296 globally
mean_TLS_df = pd.DataFrame(mean_TLS_list, columns=['Lat', 'Lon', 'Temp'])
# Make sure data is properly binned
month_of_data_df["latbin"] = month_of_data_df.Lat.map(to_bin_lat)
month_of_data_df["lonbin"] = month_of_data_df.Lon.map(to_bin_lon)
occultations_mean_removed_roladex = []
for lat_idx in range(0, len(lats)):
lat_key = lat_idx*5. - 90.
occultations_at_lat = month_of_data_df[month_of_data_df['latbin'] == lat_key]
monthly_mean_at_lat = mean_TLS_df[mean_TLS_df['Lat'] == lat_key]
for lon_idx in range(0, len(lons)):
lon_key = lon_idx*10. - 180.
occultations_in_box = occultations_at_lat[occultations_at_lat['lonbin'] == lon_key]
monthly_mean_in_box = monthly_mean_at_lat[monthly_mean_at_lat['Lon'] == lon_key]
occultations_in_box['Temp'] = occultations_in_box['Temp'] - monthly_mean_in_box['Temp'].iloc[0]
occultations_mean_removed_roladex.append(occultations_in_box)
mean_removed_df = pd.concat(occultations_mean_removed_roladex)
return mean_removed_df
def era5_df_switcher(era_5_np):
"""
Description
===========
DJF data was initially processed without recognizing the longitudinal wrapping was off by 180degrees. Instead
of reprocessing all data, just realign the data if needed.
Arguments
=========
era_5_np
--------
Numpy array of post processed era 5 data with incorrect longitudinal wrapping.
returns
=======
era_5_df_new
------------
Pandas dataframe with correctly wrapped longitude
"""
era_5_df = pd.DataFrame(era_5_np, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp'])
era_5_df_pos_lons = era_5_df[era_5_df['Lon'] >= 0]
era_5_df_neg_lons = era_5_df[era_5_df['Lon'] < 0]
era_5_df_pos_lons['Lon'] = era_5_df_pos_lons['Lon'] - 180.
era_5_df_neg_lons['Lon'] = era_5_df_neg_lons['Lon'] + 180
era_5_df_new = pd.concat([era_5_df_pos_lons, era_5_df_neg_lons])
return era_5_df_new
def background_and_bias_remover(month_of_occultation_df, month_of_era5_df):
"""
Description
===========
background_and_bias_remover is a function used to remove both high frequency synoptic variability and low
frequency bias from GPS-RO data in an effort to create a composite of TLS diurnal cycles. The function
returns a pandas DataFrame with processed data to be used for the composite.
Arguments
=========
month_of_occultation_df
-----------------------
month_of_occultation_df is a pandas DataFrame filled with TLS data from GPS-RO occultations for one month,
spanning 2006-2020. TLS data has information of year, day, local time hour, latitude, longitude, and the TLS
temperature
month_of_era5_df
----------------
month_of_era5_df is a dataframe with TLS data derived from interpolated ERA-5 reanalysis data. Like
month_of_occultation_df, month_of_era5_df is just data from 2006-2020 for one month. Data has information
of year, day, local time hour, latitude, longitude, and the TLS temperature
returns
=======
TLS temperatures after both the ERA-5 daily mean has been removed, and the monthly mean of the diurnal
cycles has been removed
"""
# Set lats and lons
lat_bins = np.arange(-90, 90., 5.)
lon_bins = np.arange(-180, 180., 10.)
bias_and_ERA5_removed_data = []
#ERA5_removed_data = []
for year in range(2006, 2021):
print(year)
year_of_occultations = month_of_occultation_df[month_of_occultation_df['Year'] == year]
year_of_occultations["latbin"] = year_of_occultations.Lat.map(to_bin_lat)
year_of_occultations['lonbin'] = year_of_occultations.Lon.map(to_bin_lon)
year_of_era5 = month_of_era5_df[month_of_era5_df['Year'] == str(year)]
month_of_daily_era_5_removed = []
if year_of_era5.Temp.size > 0:
for day in year_of_occultations['Day'].unique():
day_of_occultations = year_of_occultations[year_of_occultations['Day'] == day]
day_of_era5 = year_of_era5[year_of_era5['Day'] == day]
for lat in lat_bins:
occultations_at_lat = day_of_occultations[day_of_occultations['latbin'] == lat]
era5_at_lat = day_of_era5[day_of_era5['Lat'] == lat]
for lon in lon_bins:
occultations_at_lon = occultations_at_lat[occultations_at_lat['lonbin'] == lon]
era5_at_lon = era5_at_lat[era5_at_lat['Lon'] == lon]
era5_daily_temps = era5_at_lon.Temp.to_list()
daily_mean = np.nanmean(era5_daily_temps)
if occultations_at_lon.Temp.size > 0:
occultations_at_lon.Temp = occultations_at_lon.Temp - daily_mean
month_of_daily_era_5_removed.append(occultations_at_lon)
else:
continue
month_of_daily_era_5_removed_df = pd.concat(month_of_daily_era_5_removed)
monthly_mean_bias_by_map = monthly_mean_map(month_of_daily_era_5_removed_df)
monthly_mean_bias_removed = mean_map_removal(month_of_daily_era_5_removed_df,
monthly_mean_bias_by_map)
bias_and_ERA5_removed_data.append(monthly_mean_bias_removed)
else:
continue
bias_and_era_5_removed_df = pd.concat(bias_and_ERA5_removed_data)
return(bias_and_era_5_removed_df)
def box_mean_remover(season_of_cosmic_data):
"""
Description
===========
removes diurnal mean from each 5x10 degree box after all data processing. Really just a final centering step
Arguments
=========
season_of_cosmic_data
-----------------------
Data after all daily and monthly mean removal processing
returns
=======
pandas data frame where mean in every 5x10 box is zero
"""
lats = np.arange(-90, 90., 5)
lons = np.arange(-180, 180, 10)
hour_map = []
for i in range(0, len(lats)):
lat_band_df = []
for j in range(0, len(lons)):
lat_band = season_of_cosmic_data[season_of_cosmic_data['latbin'] == lats[i]]
lat_lon_box = lat_band[lat_band['lonbin'] == lons[j]]
lat_lon_box.dropna(subset = ["Temp"], inplace=True)
lat_lon_box['Temp'] = lat_lon_box['Temp'] - lat_lon_box.Temp.mean()
lat_band_df.append(lat_lon_box)
lat_band_df_concat = pd.concat(lat_band_df)
lat_band_df_concat['Temp'] = lat_band_df_concat['Temp'] - lat_band_df_concat.Temp.mean()
hour_map.append(lat_band_df_concat)
final_hour_map =
|
pd.concat(hour_map)
|
pandas.concat
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
def test_update(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
df.update(other)
expected = DataFrame(
[[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
tm.assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"bad_kwarg, exception, msg",
[
# errors must be 'ignore' or 'raise'
({"errors": "something"}, ValueError, "The parameter errors must.*"),
({"join": "inner"}, NotImplementedError, "Only left join is supported"),
],
)
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.0]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame(
[[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors="raise")
def test_update_from_non_df(self):
d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
df = DataFrame(d)
d["a"] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
df = DataFrame(d)
d["a"] = [5, 6, 7, 8]
df.update(d)
expected =
|
DataFrame(d)
|
pandas.DataFrame
|
import datetime
import re
import warnings
from os.path import dirname, join
import numpy as np
import pandas as pd
from techminer.core import explode
from techminer.core.extract_country_name import extract_country_name
from techminer.core.extract_words import extract_words
from techminer.core.map import map_
from techminer.core.text import remove_accents
from techminer.core.thesaurus import load_file_as_dict
warnings.filterwarnings("ignore")
from nltk import word_tokenize
class ScopusImporter:
def __init__(
self,
input_file="scopus.csv",
output_file="techminer.csv",
article=True,
article_in_press=True,
book=True,
book_chapter=True,
business_article=True,
conference_paper=True,
conference_review=False,
data_paper=True,
editorial=False,
letter=False,
note=False,
review=True,
short_survey=True,
erratum=False,
report=False,
retracted=False,
abstract_report=False,
undefined=False,
):
self.input_file = input_file
self.output_file = output_file
self.data = None
self.article = article
self.article_in_press = article_in_press
self.book = book
self.book_chapter = book_chapter
self.business_article = business_article
self.conference_paper = conference_paper
self.conference_review = conference_review
self.data_paper = data_paper
self.editorial = editorial
self.letter = letter
self.note = note
self.review = review
self.short_survey = short_survey
self.erratum = erratum
self.report = report
self.retracted = retracted
self.abstract_report = abstract_report
self.undefined = undefined
def run(self):
##
## Load data
##
self.data = pd.read_csv(self.input_file)
##
## Remove blank spaces
##
self.data = self.data.applymap(lambda w: w.strip() if isinstance(w, str) else w)
##
## Document ID
##
self.data["ID"] = range(len(self.data))
##
## Steps
##
self.rename_columns()
self.select_documents()
self.remove_accents()
self.remove_no_author_name_available()
self.format_author_names()
self.count_number_of_authors_per_document()
self.calculate_frac_number_of_documents_per_author()
self.remove_no_author_id_available()
self.disambiguate_author_names()
self.remove_text_in_foreing_languages()
self.extract_country_names()
self.extract_country_first_author()
self.reduce_list_of_countries()
self.transform_author_keywords_to_lower_case()
self.transform_index_keywords_to_lower_case()
self.remove_copyright_mark_from_abstracts()
self.transform_global_citations_NA_to_zero()
self.format_abb_source_title()
self.create_historiograph_id()
self.create_local_references()
self.transform_abstract_to_lower_case()
self.british_to_amerian()
self.keywords_in_abstract()
# self.extract_title_keywords()
# self.extract_title_words()
# self.extract_abstract_phrases_and_words()
# self.highlight_author_keywords_in_titles()
# self.highlight_author_keywords_in_abstracts()
self.compute_bradford_law_zones()
##
## Replace blanks by pd.NA
##
self.data = self.data.applymap(
lambda w: pd.NA if isinstance(w, str) and w == "" else w
)
self.data = self.data.applymap(
lambda w: w.replace(chr(8211), "-") if isinstance(w, str) else w
)
##
## Transformer output
##
if self.output_file is None:
return self.data
self.data.to_csv(self.output_file, index=False)
self.logging_info("Finished!!!")
def logging_info(self, msg):
print(
"{} - INFO - {}".format(
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), msg
)
)
# def extract_title_keywords(self):
# self.logging_info("Keywords extraction from title ...")
# author_keywords = self.data.Author_Keywords.dropna()
# author_keywords = author_keywords.map(lambda w: w.lower().split(";"))
# author_keywords = author_keywords.explode().tolist()
# author_keywords = set(author_keywords)
# index_keywords = self.data.Index_Keywords.dropna()
# index_keywords = index_keywords.map(lambda w: w.lower().split(";"))
# index_keywords = index_keywords.explode().tolist()
# index_keywords = set(index_keywords)
# keywords = author_keywords | index_keywords
# self.data["Title_Keywords"] = self.data.Title.copy()
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: word_tokenize(w.lower()), na_action="ignore"
# )
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: set(w), na_action="ignore"
# )
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: keywords & w, na_action="ignore"
# )
# self.data["Title_Keywords"] = self.data.Title_Keywords.map(
# lambda w: ";".join(w), na_action="ignore"
# )
def select_documents(self):
document_types = []
if self.article is True:
document_types.append("Article")
if self.article_in_press is True:
document_types.append("Article-in-Press")
if self.book is True:
document_types.append("Book")
if self.book_chapter is True:
document_types.append("Book Chapter")
if self.business_article is True:
document_types.append("Business Article")
if self.conference_paper is True:
document_types.append("Conference Paper")
if self.conference_review is True:
document_types.append("Conference Review")
if self.data_paper is True:
document_types.append("Data Paper")
if self.editorial is True:
document_types.append("Editorial")
if self.letter is True:
document_types.append("Letter")
if self.note is True:
document_types.append("Note")
if self.review is True:
document_types.append("Review")
if self.short_survey is True:
document_types.append("Short Survey")
if self.erratum is True:
document_types.append("Erratum")
if self.report is True:
document_types.append("Report")
if self.retracted is True:
document_types.append("Retracted")
if self.abstract_report is True:
document_types.append("Abstract Report")
if self.undefined is True:
document_types.append("Undefined")
self.data = self.data[
self.data.Document_Type.map(
lambda w: w in document_types, na_action="ignore"
)
]
self.data.index = range(len(self.data))
def keywords_in_abstract(self):
self.logging_info("Extracting Keywords from abstracts ...")
author_keywords = self.data.Author_Keywords.dropna()
author_keywords = author_keywords.map(lambda w: w.lower().split(";"))
author_keywords = author_keywords.explode().tolist()
author_keywords = set(author_keywords)
index_keywords = self.data.Index_Keywords.dropna()
index_keywords = index_keywords.map(lambda w: w.lower().split(";"))
index_keywords = index_keywords.explode().tolist()
index_keywords = set(index_keywords)
keywords = author_keywords | index_keywords
##
## Prepare compound keywords
##
compound_keywords = [w for w in keywords if len(w.split()) > 1]
compound_keywords = sorted(compound_keywords, key=len, reverse=True)
##
## Preserves compound keywords in abstrct
##
phrases = self.data.Abstract.copy()
for k in compound_keywords:
pattern = re.compile(re.escape(k), re.IGNORECASE)
phrases = phrases.map(
lambda w: pattern.sub(k.replace(" ", "_"), w), na_action="ignore"
)
##
## Tokenize words
##
phrases = phrases.map(
lambda w: set(word_tokenize(w.lower())),
na_action="ignore",
)
##
## Restore compund words
##
phrases = phrases.map(
lambda w: [m.replace("_", " ") for m in w],
na_action="ignore",
)
##
## Extracts keywords from text
###
self.data["Abstract_Keywords"] = phrases.map(
lambda w: ";".join(sorted(keywords & set(w))), na_action="ignore"
)
self.data["Abstract_Author_Keywords"] = phrases.map(
lambda w: ";".join(sorted(author_keywords & set(w))), na_action="ignore"
)
self.data["Abstract_Index_Keywords"] = phrases.map(
lambda w: ";".join(sorted(index_keywords & set(w))), na_action="ignore"
)
def british_to_amerian(self):
self.logging_info("Translate british spelling to american spelling ...")
module_path = dirname(__file__)
filename = join(module_path, "data/bg2am.data")
bg2am = load_file_as_dict(filename)
for british_word in bg2am:
self.data = self.data.applymap(
lambda w: w.replace(british_word, bg2am[british_word][0])
if isinstance(w, str)
else w
)
def rename_columns(self):
for column_to_delete in [
"Abstract HL",
# "Abstract",
"Access Type",
# "Affiliations",
"Art. No.",
# "Authors_ID",
"Authors with affiliations",
# "Bradford_Law_Zone",
"CODEN",
"Correspondence Address",
"DOI",
"Editors",
"EID",
# "Global_References",
# "ID",
"ISBN",
"ISSN",
"Issue",
"Language of Original Document",
"Link",
# "Local_References",
# "Num_Authors",
"Page count",
"Page end",
"Page start",
"Publication Stage",
"Publisher",
"PubMed ID",
"Source",
# "Global_Citations",
# "Local_Citations",
"Title HL",
# "Title",
"Volume",
# "Year",
]:
if column_to_delete in self.data.columns:
self.data.pop(column_to_delete)
scopus2tags = {
"Abbreviated Source Title": "Abb_Source_Title",
"Abstract": "Abstract",
"Access Type": "Access_Type",
"Affiliations": "Affiliations",
"Art. No.": "Art_No",
"Author Keywords": "Author_Keywords",
"Author(s) ID": "Authors_ID",
"Authors with affiliations": "Authors_with_affiliations",
"Authors": "Authors",
"Cited by": "Global_Citations",
"CODEN": "CODEN",
"Correspondence Address": "Correspondence_Address",
"Document Type": "Document_Type",
"DOI": "DOI",
"Editors": "Editors",
"EID": "EID",
"Index Keywords": "Index_Keywords",
"ISBN": "ISBN",
"ISSN": "ISSN",
"Issue": "Issue",
"Language of Original Document": "Language_of_Original_Document",
"Link": "Link",
"Page count": "Page_count",
"Page end": "Page_end",
"Page start": "Page_start",
"Publication Stage": "Publication_Stage",
"Publisher": "Publisher",
"PubMed ID": "PubMed_ID",
"References": "Global_References",
"Source title": "Source_title",
"Source": "Source",
"Title": "Title",
"Volume": "Volume",
"Year": "Year",
}
self.logging_info("Renaming and selecting columns ...")
self.data = self.data.rename(columns=scopus2tags)
def remove_accents(self):
self.logging_info("Removing accents ...")
self.data = self.data.applymap(
lambda w: remove_accents(w) if isinstance(w, str) else w
)
def remove_no_author_name_available(self):
if "Authors" not in self.data.columns:
return
self.logging_info('Removing "[No author name available]" ...')
self.data["Authors"] = self.data.Authors.map(
lambda w: pd.NA if w == "[No author name available]" else w
)
def format_author_names(self):
if "Authors" not in self.data.columns:
return
self.logging_info("Formatting author names ...")
self.data["Authors"] = self.data.Authors.map(
lambda w: w.replace(",", ";").replace(".", "") if pd.isna(w) is False else w
)
def count_number_of_authors_per_document(self):
if "Authors" not in self.data.columns:
return
self.logging_info("Counting number of authors per document...")
self.data["Num_Authors"] = self.data.Authors.map(
lambda w: len(w.split(";")) if not pd.isna(w) else 0
)
def calculate_frac_number_of_documents_per_author(self):
if "Authors" not in self.data.columns:
return
self.logging_info("Counting frac number of documents per author...")
self.data["Frac_Num_Documents"] = self.data.Authors.map(
lambda w: 1.0 / len(w.split(";")) if not
|
pd.isna(w)
|
pandas.isna
|
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
@pytest.fixture
def idx_expected(self):
idx = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B").tz_localize(
"US/Pacific"
)
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
assert expected.dtype == idx.dtype
return idx, expected
def test_to_series_keep_tz_deprecated_true(self, idx_expected):
# convert to series while keeping the timezone
idx, expected = idx_expected
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
assert msg in str(m[0].message)
tm.assert_series_equal(result, expected)
def test_to_series_keep_tz_deprecated_false(self, idx_expected):
idx, expected = idx_expected
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
def test_setitem_dt64series(self, idx_expected):
# convert to utc
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
def test_constructor_from_tzaware_datetimeindex(self, idx_expected):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx, expected = idx_expected
# convert index to series
result = Series(idx)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
import copy
import re
import warnings
import numpy as np
import pandas as pd
import xarray
import scipy.stats as st
import numba
try:
import arviz as az
import arviz.plots.plot_utils
except:
warnings.warn(
"Could not import ArviZ. Perhaps it is not installed."
" Some functionality in the viz submodule will not be available."
)
import scipy.ndimage
import matplotlib._contour
from matplotlib.pyplot import get_cmap as mpl_get_cmap
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import colorcet
try:
import holoviews as hv
import holoviews.operation.datashader
hv.extension("bokeh")
except ImportError as e:
warnings.warn(
f"""DataShader import failed with error "{e}".
Features requiring DataShader will not work and you will get exceptions."""
)
from . import utils
from . import image
try:
from . import stan
except:
warnings.warn(
"Could not import `stan` submodule. Perhaps pystan or cmdstanpy is not properly installed."
)
def confints(
summaries, p=None, marker_kwargs={}, line_kwargs={}, palette=None, **kwargs
):
"""Make a horizontal plot of centers/conf ints with error bars.
Parameters
----------
summaries : list of dicts
Each entry in `summaries` is a dictionary containing minimally
keys 'estimate', 'conf_int', and 'label'. The 'estimate' value
is the point estimate, a single scalar. The 'conf_int' value is
a two-tuple, two-list, or two-numpy array containing the low and
high end of the confidence interval for the estimate. The
'label' value is the name of the variable. This gives the label
of the y-ticks.
p : bokeh.plotting.Figure instance or None, default None
If not None, a figure to be populated with confidence interval
plot. If specified, it is important that `p.y_range` be set to
contain all of the values in the labels provided in the
`summaries` input. If `p` is None, then a new figure is created.
marker_kwargs : dict, default {}
Kwargs to be passed to p.circle() for plotting estimates.
line_kwargs : dict, default {}
Kwargs passsed to p.line() to plot the confidence interval.
palette : list, str, or None
If None, default colors (or those given in `marker_kwargs` and
`line_kwargs` are used). If a str, all glyphs are colored
accordingly, e.g., 'black'. Otherwise a list of colors is used.
kwargs : dict
Any additional kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Plot of error bars.
"""
n = len(summaries)
labels = [summary["label"] for summary in summaries]
estimates = [summary["estimate"] for summary in summaries]
conf_intervals = [summary["conf_int"] for summary in summaries]
if palette is None:
use_palette = False
else:
if (
"color" in marker_kwargs
or "line_color" in marker_kwargs
or "fill_color" in marker_kwargs
or "color" in line_kwargs
or "line_color" in line_kwargs
):
raise RuntimeError(
"`palette` must be None if color is specified in "
"`marker_kwargs` or `line_kwargs`"
)
if type(palette) == str:
marker_kwargs["color"] = palette
line_kwargs["color"] = palette
use_palette = False
elif type(palette) == list or type(palette) == tuple:
palette[:n][::-1]
use_palette = True
line_width = kwargs.pop("line_width", 3)
size = marker_kwargs.pop("size", 5)
if p is None:
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 50 * n
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
toolbar_location = kwargs.pop("toolbar_location", "above")
p = bokeh.plotting.figure(
y_range=labels[::-1], toolbar_location=toolbar_location, **kwargs
)
for i, (estimate, conf, label) in enumerate(zip(estimates, conf_intervals, labels)):
if use_palette:
marker_kwargs["color"] = palette[i % len(palette)]
line_kwargs["color"] = palette[i % len(palette)]
p.circle(x=[estimate], y=[label], size=size, **marker_kwargs)
p.line(x=conf, y=[label, label], line_width=line_width, **line_kwargs)
return p
def fill_between(
x1=None,
y1=None,
x2=None,
y2=None,
show_line=True,
patch_kwargs={},
line_kwargs={},
p=None,
**kwargs,
):
"""
Create a filled region between two curves.
Parameters
----------
x1 : array_like
Array of x-values for first curve
y1 : array_like
Array of y-values for first curve
x2 : array_like
Array of x-values for second curve
y2 : array_like
Array of y-values for second curve
show_line : bool, default True
If True, show the lines on the edges of the fill.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill.
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with fill-between.
"""
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 350
if p is None:
p = bokeh.plotting.figure(**kwargs)
line_width = patch_kwargs.pop("line_width", 0)
line_alpha = patch_kwargs.pop("line_alpha", 0)
p.patch(
x=np.concatenate((x1, x2[::-1])),
y=np.concatenate((y1, y2[::-1])),
line_width=line_width,
line_alpha=line_alpha,
**patch_kwargs,
)
if show_line:
line_width = line_kwargs.pop("line_width", 2)
p.line(x1, y1, line_width=line_width, **line_kwargs)
p.line(x2, y2, line_width=line_width, **line_kwargs)
return p
def qqplot(
samples,
data,
percentile=95,
patch_kwargs={},
line_kwargs={},
diag_kwargs={},
p=None,
**kwargs,
):
"""
Generate a Q-Q plot.
Parameters
----------
samples : Numpy array or xarray, shape (n_samples, n) or xarray DataArray
A Numpy array containing predictive samples.
data : Numpy array, shape (n,) or xarray DataArray
One-dimensional data set to use in Q-Q plot.
percentile : int or float, default 95
Which percentile to use in displaying the Q-Q plot.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the filled
region of the Q-Q plot..
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
diag_kwargs : dict
Any kwargs to be passed into p.line() in generating diagonal
reference line of Q-Q plot.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with Q-Q plot.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("`samples` can only be a Numpy array or xarray.")
if samples.ndim != 2:
raise RuntimeError(
"`samples` must be a 2D array, with each row being a sample."
)
if len(samples) < 100:
warnings.warn(
"`samples` has very few samples. Predictive percentiles may be poor."
)
if data is not None and len(data) != samples.shape[1]:
raise RuntimeError("Mismatch in shape of `data` and `samples`.")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 275
if "fill_alpha" not in patch_kwargs:
patch_kwargs["fill_alpha"] = 0.5
x = np.sort(data)
samples = np.sort(samples)
# Upper and lower bounds
low_theor, up_theor = np.percentile(
samples, (50 - percentile / 2, 50 + percentile / 2), axis=0
)
x_range = [data.min(), data.max()]
if "x_range" not in kwargs:
kwargs["x_range"] = x_range
if p is None:
p = bokeh.plotting.figure(**kwargs)
p = fill_between(
x,
up_theor,
x,
low_theor,
patch_kwargs=patch_kwargs,
line_kwargs=line_kwargs,
show_line=True,
p=p,
)
# Plot 45 degree line
color = diag_kwargs.pop("color", "black")
alpha = diag_kwargs.pop("alpha", 0.5)
line_width = diag_kwargs.pop("line_width", 4)
p.line(x_range, x_range, line_width=line_width, color=color, alpha=alpha)
return p
def _ecdf(
data=None,
p=None,
x_axis_label=None,
y_axis_label="ECDF",
title=None,
plot_height=300,
plot_width=450,
staircase=False,
complementary=False,
x_axis_type="linear",
y_axis_type="linear",
**kwargs,
):
"""
Create a plot of an ECDF.
Parameters
----------
data : array_like
One-dimensional array of data. Nan's are ignored.
conf_int : bool, default False
If True, display a confidence interval on the ECDF.
ptiles : list, default [2.5, 97.5]
The percentiles to use for the confidence interval. Ignored it
`conf_int` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to do to compute confidence
interval. Ignored if `conf_int` is False.
fill_color : str, default 'lightgray'
Color of the confidence interbal. Ignored if `conf_int` is
False.
fill_alpha : float, default 1
Opacity of confidence interval. Ignored if `conf_int` is False.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
x_axis_label : str, default None
Label for the x-axis. Ignored if `p` is not None.
y_axis_label : str, default 'ECDF' or 'ECCDF'
Label for the y-axis. Ignored if `p` is not None.
title : str, default None
Title of the plot. Ignored if `p` is not None.
plot_height : int, default 300
Height of plot, in pixels. Ignored if `p` is not None.
plot_width : int, default 450
Width of plot, in pixels. Ignored if `p` is not None.
staircase : bool, default False
If True, make a plot of a staircase ECDF (staircase). If False,
plot the ECDF as dots.
complementary : bool, default False
If True, plot the empirical complementary cumulative
distribution functon.
x_axis_type : str, default 'linear'
Either 'linear' or 'log'.
y_axis_type : str, default 'linear'
Either 'linear' or 'log'.
kwargs
Any kwargs to be passed to either p.circle or p.line, for
`staircase` being False or True, respectively.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with ECDF.
"""
# Check data to make sure legit
data = utils._convert_data(data)
# Data points on ECDF
x, y = _ecdf_vals(data, staircase, complementary)
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "ECCDF" if complementary else "ECDF")
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_axis_type=x_axis_type,
y_axis_type=y_axis_type,
title=title,
)
if staircase:
# Line of steps
p.line(x, y, **kwargs)
# Rays for ends
if complementary:
p.ray(x=x[0], y=1, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=0, length=0, angle=0, **kwargs)
else:
p.ray(x=x[0], y=0, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=1, length=0, angle=0, **kwargs)
else:
p.circle(x, y, **kwargs)
return p
def _histogram(
data=None,
bins="freedman-diaconis",
p=None,
density=False,
kind="step",
line_kwargs={},
patch_kwargs={},
**kwargs,
):
"""
Make a plot of a histogram of a data set.
Parameters
----------
data : array_like
1D array of data to make a histogram out of
bins : int, array_like, or str, default 'freedman-diaconis'
If int or array_like, setting for `bins` kwarg to be passed to
`np.histogram()`. If 'exact', then each unique value in the
data gets its own bin. If 'integer', then integer data is
assumed and each integer gets its own bin. If 'sqrt', uses the
square root rule to determine number of bins. If
`freedman-diaconis`, uses the Freedman-Diaconis rule for number
of bins.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
density : bool, default False
If True, normalized the histogram. Otherwise, base the histogram
on counts.
kind : str, default 'step'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
line_kwargs : dict
Any kwargs to be passed to p.line() in making the line of the
histogram.
patch_kwargs : dict
Any kwargs to be passed to p.patch() in making the fill of the
histogram.
kwargs : dict
All other kwargs are passed to bokeh.plotting.figure()
Returns
-------
output : Bokeh figure
Figure populated with histogram.
"""
if data is None:
raise RuntimeError("Input `data` must be specified.")
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0))
p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs)
# Compute histogram
bins = _bins_to_np(data, bins)
e0, f0 = _compute_histogram(data, bins, density)
if kind == "step":
p.line(e0, f0, **line_kwargs)
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs)
return p
def _bins_to_np(data, bins):
"""Compute a Numpy array to pass to np.histogram() as bins."""
if type(bins) == str and bins not in [
"integer",
"exact",
"sqrt",
"freedman-diaconis",
]:
raise RuntimeError("Invalid bins specification.")
if type(bins) == str and bins == "exact":
a = np.unique(data)
if len(a) == 1:
bins = np.array([a[0] - 0.5, a[0] + 0.5])
else:
bins = np.concatenate(
(
(a[0] - (a[1] - a[0]) / 2,),
(a[1:] + a[:-1]) / 2,
(a[-1] + (a[-1] - a[-2]) / 2,),
)
)
elif type(bins) == str and bins == "integer":
if np.any(data != np.round(data)):
raise RuntimeError("'integer' bins chosen, but data are not integer.")
bins = np.arange(data.min() - 1, data.max() + 1) + 0.5
elif type(bins) == str and bins == "sqrt":
bins = int(np.ceil(np.sqrt(len(data))))
elif type(bins) == str and bins == "freedman-diaconis":
h = 2 * (np.percentile(data, 75) - np.percentile(data, 25)) / np.cbrt(len(data))
if h == 0.0:
bins = 3
else:
bins = int(np.ceil((data.max() - data.min()) / h))
return bins
def _compute_histogram(data, bins, density):
"""Compute values of histogram for plotting."""
f, e = np.histogram(data, bins=bins, density=density)
e0 = np.empty(2 * len(e))
f0 = np.empty(2 * len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
return e0, f0
def predictive_ecdf(
samples,
data=None,
diff=None,
percentiles=(95, 68),
color="blue",
data_color="orange",
data_staircase=True,
data_size=2,
x=None,
discrete=False,
p=None,
**kwargs,
):
"""Plot a predictive ECDF from samples.
Parameters
----------
samples : Numpy array or xarray, shape (n_samples, n) or xarray DataArray
A Numpy array containing predictive samples.
data : Numpy array, shape (n,) or xarray DataArray, or None
If not None, ECDF of measured data is overlaid with predictive
ECDF.
diff : 'ecdf', 'iecdf', 'eppf', or None, default None
Referring to the variable as x, if `diff` is 'iecdf' or 'eppf',
for each value of the ECDF, plot the value of x minus the median
x. If 'ecdf', plot the value of the ECDF minus the median ECDF
value. If None, just plot the ECDFs.
percentiles : list or tuple, default (95, 68)
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_color : str, default 'orange'
String representing the color of the data to be plotted over the
confidence interval envelopes.
data_staircase : bool, default True
If True, plot the ECDF of the data as a staircase.
Otherwise plot it as dots.
data_size : int, default 2
Size of marker (if `data_line` if False) or thickness of line
(if `data_staircase` is True) of plot of data.
x : Numpy array, default None
Points at which to evaluate the ECDF. If None, points are
automatically generated based on the data range.
discrete : bool, default False
If True, the samples take on discrete values.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
ECDF of the samples. The shading goes according to percentiles
of samples of the ECDF, with the median ECDF plotted as line in
the middle.
"""
if diff == True:
diff = "ecdf"
warnings.warn(
"`diff` as a Boolean is deprecated. Use 'ecdf', 'iecdf', or None."
" Using `diff = 'ecdf'`.",
DeprecationWarning,
)
elif diff == False:
diff = None
warnings.warn(
"`diff` as a Boolean is deprecated. Use 'ecdf', 'iecdf', or None."
" Using `diff = None`.",
DeprecationWarning,
)
if diff is not None:
diff = diff.lower()
if diff == "eppf":
diff = "iecdf"
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("`samples` can only be a Numpy array or xarray.")
if samples.ndim != 2:
raise RuntimeError(
"`samples` must be a 2D array, with each row being a sample."
)
if len(samples) < 100:
warnings.warn(
"`samples` has very few samples. Predictive percentiles may be poor."
)
if data is not None and len(data) != samples.shape[1]:
raise RuntimeError("Mismatch in shape of `data` and `samples`.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
samples = np.sort(samples)
n = samples.shape[1]
if data is not None:
data_plot = np.sort(np.array(data))
# y-values for ECDFs
y = np.arange(1, n + 1) / n
df_ecdf = pd.DataFrame(dict(y=y))
for ptile in ptiles:
df_ecdf[str(ptile)] = np.percentile(samples, ptile, axis=0)
# Set up plot
if p is None:
x_axis_label = kwargs.pop(
"x_axis_label", "x difference" if diff == "iecdf" else "x"
)
y_axis_label = kwargs.pop(
"y_axis_label", "ECDF difference" if diff == "ecdf" else "ECDF"
)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
# Plot the predictive intervals
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if diff == "ecdf":
med = df_ecdf["50"].values
x1_val = df_ecdf[ptile].values
x1_post = med[med > x1_val.max()]
x2_val = df_ecdf[ptiles_str[-i - 1]].values
x2_pre = med[med < x2_val.min()]
x1 = np.concatenate((x1_val, x1_post))
y1 = np.concatenate((y, np.ones_like(x1_post)))
y1 -= _ecdf_arbitrary_points(df_ecdf["50"].values, x1)
x2 = np.concatenate((x2_pre, x2_val))
y2 = np.concatenate((np.zeros_like(x2_pre), y))
y2 -= _ecdf_arbitrary_points(df_ecdf["50"].values, x2)
x1, y1 = cdf_to_staircase(x1, y1)
x2, y2 = cdf_to_staircase(x2, y2)
else:
if diff == "iecdf":
df_ecdf[ptile] -= df_ecdf["50"]
df_ecdf[ptiles_str[-i - 1]] -= df_ecdf["50"]
x1, y1 = cdf_to_staircase(df_ecdf[ptile].values, y)
x2, y2 = cdf_to_staircase(df_ecdf[ptiles_str[-i - 1]].values, y)
fill_between(
x1,
y1,
x2,
y2,
p=p,
show_line=False,
patch_kwargs=dict(color=colors[color][i], alpha=0.5),
)
# The median as a solid line
if diff == "ecdf":
p.ray(
x=df_ecdf["50"].min(),
y=0.0,
length=0,
angle=np.pi,
line_width=2,
color=colors[color][-1],
)
p.ray(
x=df_ecdf["50"].min(),
y=0.0,
length=0,
angle=0,
line_width=2,
color=colors[color][-1],
)
elif diff == "iecdf":
p.line([0.0, 0.0], [0.0, 1.0], line_width=2, color=colors[color][-1])
else:
x, y_median = cdf_to_staircase(df_ecdf["50"], y)
p.line(x, y_median, line_width=2, color=colors[color][-1])
p.ray(
x=x.min(),
y=0.0,
length=0,
angle=np.pi,
line_width=2,
color=colors[color][-1],
)
p.ray(
x=x.max(),
y=int(diff is None),
length=0,
angle=0,
line_width=2,
color=colors[color][-1],
)
# Overlay data set
if data is not None:
if data_staircase:
if diff == "iecdf":
data_plot -= df_ecdf["50"]
x_data, y_data = cdf_to_staircase(data_plot, y)
elif diff == "ecdf":
med = df_ecdf["50"].values
x_data = np.sort(np.unique(np.concatenate((data_plot, med))))
data_ecdf = _ecdf_arbitrary_points(data_plot, x_data)
med_ecdf = _ecdf_arbitrary_points(med, x_data)
x_data, y_data = cdf_to_staircase(x_data, data_ecdf - med_ecdf)
else:
x_data, y_data = cdf_to_staircase(data_plot, y)
p.line(x_data, y_data, color=data_color, line_width=data_size)
# Extend to infinity
if diff != "iecdf":
p.ray(
x=x_data.min(),
y=0.0,
length=0,
angle=np.pi,
line_width=data_size,
color=data_color,
)
p.ray(
x=x_data.max(),
y=int(diff is None),
length=0,
angle=0,
line_width=data_size,
color=data_color,
)
else:
if diff == "iecdf":
p.circle(data_plot - df_ecdf["50"], y, color=data_color, size=data_size)
elif diff == "ecdf":
p.circle(
data_plot,
y - _ecdf_arbitrary_points(df_ecdf["50"].values, data_plot),
color=data_color,
size=data_size,
)
else:
p.circle(data_plot, y, color=data_color, size=data_size)
return p
def predictive_regression(
samples,
samples_x,
data=None,
diff=False,
percentiles=[95, 68],
color="blue",
data_kwargs={},
p=None,
**kwargs,
):
"""Plot a predictive regression plot from samples.
Parameters
----------
samples : Numpy array, shape (n_samples, n_x) or xarray DataArray
Numpy array containing predictive samples of y-values.
sample_x : Numpy array, shape (n_x,)
data : Numpy array, shape (n, 2) or xarray DataArray
If not None, the measured data. The first column is the x-data,
and the second the y-data. These are plotted as points over the
predictive plot.
diff : bool, default True
If True, the predictive y-values minus the median of the
predictive y-values are plotted.
percentiles : list, default [95, 68]
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_kwargs : dict
Any kwargs to be passed to p.circle() when plotting the data
points.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
the samples. The shading goes according to percentiles of
samples, with the median plotted as line in the middle.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("Samples can only be Numpy arrays and xarrays.")
if type(samples_x) != np.ndarray:
if type(samples_x) == xarray.core.dataarray.DataArray:
samples_x = samples_x.squeeze().values
else:
raise RuntimeError("`samples_x` can only be Numpy array or xarray.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
if samples.shape[1] != len(samples_x):
raise ValueError(
"`samples_x must have the same number of entries as `samples` does columns."
)
# It's useful to have data as a data frame
if data is not None:
if type(data) == tuple and len(data) == 2 and len(data[0]) == len(data[1]):
data = np.vstack(data).transpose()
df_data = pd.DataFrame(data=data, columns=["__data_x", "__data_y"])
df_data = df_data.sort_values(by="__data_x")
# Make sure all entries in x-data in samples_x
if diff:
if len(samples_x) != len(df_data) or not np.allclose(
np.sort(samples_x), df_data["__data_x"].values
):
raise ValueError(
"If `diff == True`, then samples_x must match the x-values of `data`."
)
df_pred = pd.DataFrame(
data=np.percentile(samples, ptiles, axis=0).transpose(),
columns=[str(ptile) for ptile in ptiles],
)
df_pred["__x"] = samples_x
df_pred = df_pred.sort_values(by="__x")
if p is None:
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y difference" if diff else "y")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if diff:
y1 = df_pred[ptile] - df_pred["50"]
y2 = df_pred[ptiles_str[-i - 1]] - df_pred["50"]
else:
y1 = df_pred[ptile]
y2 = df_pred[ptiles_str[-i - 1]]
fill_between(
x1=df_pred["__x"],
x2=df_pred["__x"],
y1=y1,
y2=y2,
p=p,
show_line=False,
patch_kwargs=dict(fill_color=colors[color][i]),
)
# The median as a solid line
if diff:
p.line(
df_pred["__x"],
np.zeros_like(samples_x),
line_width=2,
color=colors[color][-1],
)
else:
p.line(df_pred["__x"], df_pred["50"], line_width=2, color=colors[color][-1])
# Overlay data set
if data is not None:
data_color = data_kwargs.pop("color", "orange")
data_alpha = data_kwargs.pop("alpha", 1.0)
data_size = data_kwargs.pop("size", 2)
if diff:
p.circle(
df_data["__data_x"],
df_data["__data_y"] - df_pred["50"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
else:
p.circle(
df_data["__data_x"],
df_data["__data_y"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
return p
def sbc_rank_ecdf(
sbc_output=None,
parameters=None,
diff=True,
ptile=99.0,
bootstrap_envelope=False,
n_bs_reps=None,
show_envelope=True,
show_envelope_line=True,
color_by_warning_code=False,
staircase=False,
p=None,
marker_kwargs={},
envelope_patch_kwargs={},
envelope_line_kwargs={},
palette=None,
show_legend=True,
**kwargs,
):
"""Make a rank ECDF plot from simulation-based calibration.
Parameters
----------
sbc_output : DataFrame
Output of bebi103.stan.sbc() containing results from an SBC
calculation.
parameters : list of str, or None (default)
List of parameters to include in the SBC rank ECDF plot. If
None, use all parameters. For multidimensional parameters, each
entry must be given separately, e.g.,
`['alpha[0]', 'alpha[1]', 'beta[0,1]']`.
diff : bool, default True
If True, plot the ECDF minus the ECDF of a Uniform distribution.
Otherwise, plot the ECDF of the rank statistic from SBC.
ptile : float, default 99
Which precentile to use as the envelope in the plot.
bootstrap_envelope : bool, default False
If True, use bootstrapping on the appropriate Uniform
distribution to compute the envelope. Otherwise, use the
Gaussian approximation for the envelope.
n_bs_reps : bool, default None
Number of bootstrap replicates to use when computing the
envelope. If None, n_bs_reps is determined from the formula
int(max(n, max(L+1, 100/(100-ptile))) * 100), where n is the
number of simulations used in the SBC calculation.
show_envelope : bool, default True
If True, display the envelope encompassing the ptile percent
confidence interval for the SBC ECDF.
show_envelope_line : bool, default True
If True, and `show_envelope` is also True, plot a line around
the envelope.
color_by_warning_code : bool, default False
If True, color glyphs by diagnostics warning code instead of
coloring the glyphs by parameter
staircase : bool, default False
If True, plot the ECDF as a staircase. Otherwise, plot with
dots.
p : bokeh.plotting.Figure instance, default None
Plot to which to add the SBC rank ECDF plot. If None, create a
new figure.
marker_kwargs : dict, default {}
Dictionary of kwargs to pass to `p.circle()` or `p.line()` when
plotting the SBC ECDF.
envelope_patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill of
the envelope.
envelope_line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill of the envelope.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
show_legend : bool, default True
If True, show legend.
kwargs : dict
Any kwargs passed to `bokeh.plotting.figure()` when creating the
plot.
Returns
-------
output : bokeh.plotting.Figure instance
A plot containing the SBC plot.
Notes
-----
You can see example SBC ECDF plots in Fig. 14 b and c in this
paper: https://arxiv.org/abs/1804.06788
"""
if sbc_output is None:
raise RuntimeError("Argument `sbc_output` must be specified.")
# Defaults
if palette is None:
palette = colorcet.b_glasbey_category10
elif palette not in [list, tuple]:
palette = [palette]
if "x_axis_label" not in kwargs:
kwargs["x_axis_label"] = "rank statistic"
if "y_axis_label" not in kwargs:
kwargs["y_axis_label"] = "ECDF difference" if diff else "ECDF"
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
toolbar_location = kwargs.pop("toolbar_location", "above")
if "fill_color" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_color"] = "gray"
if "fill_alpha" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_alpha"] = 0.5
if "line_color" not in envelope_line_kwargs:
envelope_line_kwargs["line_color"] = "gray"
if "color" in "marker_kwargs" and color_by_warning_code:
raise RuntimeError(
"Cannot specify marker color when `color_by_warning_code` is True."
)
if staircase and color_by_warning_code:
raise RuntimeError("Cannot color by warning code for staircase ECDFs.")
if parameters is None:
parameters = list(sbc_output["parameter"].unique())
elif type(parameters) not in [list, tuple]:
parameters = [parameters]
L = sbc_output["L"].iloc[0]
df = sbc_output.loc[
sbc_output["parameter"].isin(parameters),
["parameter", "rank_statistic", "warning_code"],
]
n = (df["parameter"] == df["parameter"].unique()[0]).sum()
if show_envelope:
x, y_low, y_high = _sbc_rank_envelope(
L,
n,
ptile=ptile,
diff=diff,
bootstrap=bootstrap_envelope,
n_bs_reps=n_bs_reps,
)
p = fill_between(
x1=x,
x2=x,
y1=y_high,
y2=y_low,
patch_kwargs=envelope_patch_kwargs,
line_kwargs=envelope_line_kwargs,
show_line=show_envelope_line,
p=p,
toolbar_location=toolbar_location,
**kwargs,
)
else:
p = bokeh.plotting.figure(toolbar_location=toolbar_location, **kwargs)
if staircase:
dfs = []
for param in parameters:
if diff:
x_data, y_data = _ecdf_diff(
df.loc[df["parameter"] == param, "rank_statistic"],
L,
staircase=True,
)
else:
x_data, y_data = _ecdf_vals(
df.loc[df["parameter"] == param, "rank_statistic"], staircase=True
)
dfs.append(
pd.DataFrame(
data=dict(rank_statistic=x_data, __ECDF=y_data, parameter=param)
)
)
df = pd.concat(dfs, ignore_index=True)
else:
df["__ECDF"] = df.groupby("parameter")["rank_statistic"].transform(_ecdf_y)
if diff:
df["__ECDF"] -= (df["rank_statistic"] + 1) / L
if staircase:
color = marker_kwargs.pop("color", palette)
if type(color) == str:
color = [color] * len(parameters)
elif "color" not in marker_kwargs:
color = palette
else:
color = [marker_kwargs.pop("color")] * len(parameters)
if color_by_warning_code:
if len(color) < len(df["warning_code"].unique()):
raise RuntimeError(
"Not enough colors in palette to cover all warning codes."
)
elif len(color) < len(parameters):
raise RuntimeError("Not enough colors in palette to cover all parameters.")
if staircase:
plot_cmd = p.line
else:
plot_cmd = p.circle
if show_legend:
legend_items = []
if color_by_warning_code:
for i, (warning_code, g) in enumerate(df.groupby("warning_code")):
if show_legend:
legend_items.append(
(
str(warning_code),
[
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
],
)
)
else:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
else:
for i, (param, g) in enumerate(df.groupby("parameter")):
if show_legend:
legend_items.append(
(
param,
[
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
],
)
)
else:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
if show_legend:
legend = bokeh.models.Legend(items=legend_items)
p.add_layout(legend, "right")
p.legend.click_policy = "hide"
return p
def parcoord(
samples=None,
parameters=None,
palette=None,
omit=None,
include_ppc=False,
include_log_lik=False,
transformation=None,
color_by_chain=False,
line_kwargs={},
divergence_kwargs={},
xtick_label_orientation=0.7853981633974483,
**kwargs,
):
"""
Make a parallel coordinate plot of MCMC samples. The x-axis is the
parameter name and the y-axis is the value of the parameter,
possibly transformed to so the scale of all parameters are similar.
Parameters
----------
samples : ArviZ InferenceData instance or xarray Dataset instance
Result of MCMC sampling.
parameters : list of str, or None (default)
Names of parameters to include in the plot. If None, use all
parameters. For multidimensional parameters, each entry must be
given separately, e.g., `['alpha[0]', 'alpha[1]', 'beta[0,1]']`.
If a given entry is a 2-tuple, the first entry is the variable
name, and the second entry is the label for the parameter in
plots.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
omit : str, re.Pattern, or list or tuple of str and re.Pattern
If `parameters` is not provided, all parameters are used in the
parallel coordinate plot. We often want to ignore samples of
some variables. For each string entry in `omit`, the variable
given by the string is omitted. For each entry that is a
compiled regular expression patters (`re.Pattern`), any variable
name matching the pattern is omitted.
include_ppc : bool, default False
If True, include variables ending in _ppc, which denotes
posterior predictive checks, in the plot.
include_log_lik: bool, default False
If True, include variables starting with log_lik or loglik.
These denote log-likelihood contributions.
transformation : function, str, or dict, default None
A transformation to apply to each set of samples. The function
must take a single array as input and return an array as the
same size. If None, nor transformation is done. If a dictionary,
each key is the variable name and the corresponding value is a
function for the transformation of that variable. Alternatively,
if `transformation` is `'minmax'`, the data are scaled to range
from zero to one, or if `transformation` is `'rank'`, the rank
of the each data is used.
color_by_chain : bool, default False
If True, color the lines by chain.
line_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of non-divergent samples.
divergence_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of divergent samples.
xtick_label_orientation : str or float, default π/4.
Orientation of x tick labels. In some plots, horizontally
labeled ticks will have label clashes, and this can fix that.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()` when
instantiating the figure.
Returns
-------
output : Bokeh plot
Parallel coordinates plot.
"""
if parameters is not None and omit is not None:
raise RuntimeError("At least one of `parameters` and `omit` must be None.")
omit = _parse_omit(omit, include_ppc, include_log_lik)
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
line_width = line_kwargs.pop("line_width", 0.5)
alpha = line_kwargs.pop("alpha", 0.02)
line_join = line_kwargs.pop("line_join", "bevel")
if "color" in line_kwargs and color_by_chain:
raise RuntimeError(
"Cannot specify line color and also color by chain. If coloring by chain, use `palette` kwarg to specify color scheme."
)
color = line_kwargs.pop("color", "black")
divergence_line_join = divergence_kwargs.pop("line_join", "bevel")
divergence_line_width = divergence_kwargs.pop("line_width", 1)
divergence_color = divergence_kwargs.pop("color", "orange")
divergence_alpha = divergence_kwargs.pop("alpha", 1)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 175
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 600
toolbar_location = kwargs.pop("toolbar_location", "above")
if "x_range" in kwargs:
raise RuntimeError("Cannot specify x_range; this is inferred.")
if not color_by_chain:
palette = [color] * len(palette)
if type(samples) != az.data.inference_data.InferenceData:
raise RuntimeError("Input must be an ArviZ InferenceData instance.")
if not hasattr(samples, "posterior"):
raise RuntimeError("Input samples do not have 'posterior' group.")
if not (
hasattr(samples, "sample_stats") and hasattr(samples.sample_stats, "diverging")
):
warnings.warn("No divergence information available.")
parameters, df = stan._samples_parameters_to_df(samples, parameters, omit)
parameters, labels = _parse_parameters(parameters)
df.rename(columns={param: label for param, label in zip(parameters, labels)})
if transformation == "minmax":
transformation = {
param: lambda x: (x - x.min()) / (x.max() - x.min())
if x.min() < x.max()
else 0.0
for param in labels
}
elif transformation == "rank":
transformation = {param: lambda x: st.rankdata(x) for param in labels}
if transformation is None:
transformation = {param: lambda x: x for param in labels}
if callable(transformation) or transformation is None:
transformation = {param: transformation for param in labels}
for col, trans in transformation.items():
df[col] = trans(df[col])
df = df.melt(id_vars=["diverging__", "chain__", "draw__"])
p = bokeh.plotting.figure(
x_range=bokeh.models.FactorRange(*labels),
toolbar_location=toolbar_location,
**kwargs,
)
# Plots for samples that were not divergent
ys = np.array(
[
group["value"].values
for _, group in df.loc[~df["diverging__"]].groupby(["chain__", "draw__"])
]
)
if len(ys) > 0:
ys = [y for y in ys]
xs = [list(df["variable"].unique())] * len(ys)
p.multi_line(
xs,
ys,
line_width=line_width,
alpha=alpha,
line_join=line_join,
color=[palette[i % len(palette)] for i in range(len(ys))],
**line_kwargs,
)
# Plots for samples that were divergent
ys = np.array(
[
group["value"].values
for _, group in df.loc[df["diverging__"]].groupby(["chain__", "draw__"])
]
)
if len(ys) > 0:
ys = [y for y in ys]
xs = [list(df["variable"].unique())] * len(ys)
p.multi_line(
xs,
ys,
alpha=divergence_alpha,
line_join=line_join,
color=divergence_color,
line_width=divergence_line_width,
**divergence_kwargs,
)
p.xaxis.major_label_orientation = xtick_label_orientation
return p
def trace(
samples=None,
parameters=None,
palette=None,
omit=None,
include_ppc=False,
include_log_lik=False,
line_kwargs={},
**kwargs,
):
"""
Make a trace plot of MCMC samples.
Parameters
----------
samples : ArviZ InferenceData instance or xarray Dataset instance
Result of MCMC sampling.
parameters : list of str, or None (default)
Names of parameters to include in the plot. If None, use all
parameters. For multidimensional parameters, each entry must be
given separately, e.g., `['alpha[0]', 'alpha[1]', 'beta[0,1]']`.
If a given entry is a 2-tuple, the first entry is the variable
name, and the second entry is the label for the parameter in
plots.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
omit : str, re.Pattern, or list or tuple of str and re.Pattern
If `parameters` is not provided, all parameters are used in the
parallel coordinate plot. We often want to ignore samples of
some variables. For each string entry in `omit`, the variable
given by the string is omitted. For each entry that is a
compiled regular expression patters (`re.Pattern`), any variable
name matching the pattern is omitted.
include_ppc : bool, default False
If True, include variables ending in _ppc, which denotes
posterior predictive checks, in the plot.
include_log_lik: bool, default False
If True, include variables starting with log_lik or loglik.
These denote log-likelihood contributions.
line_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of non-divergent samples.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()`.
Returns
-------
output : Bokeh gridplot
Set of chain traces as a Bokeh gridplot.
"""
if parameters is not None and omit is not None:
raise RuntimeError("At least one of `parameters` and `omit` must be None.")
omit = _parse_omit(omit, include_ppc, include_log_lik)
if type(samples) != az.data.inference_data.InferenceData:
raise RuntimeError("Input must be an ArviZ InferenceData instance.")
if not hasattr(samples, "posterior"):
raise RuntimeError("Input samples do not have 'posterior' group.")
parameters, df = stan._samples_parameters_to_df(samples, parameters, omit)
parameters, labels = _parse_parameters(parameters)
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
line_width = line_kwargs.pop("line_width", 0.5)
alpha = line_kwargs.pop("alpha", 0.5)
line_join = line_kwargs.pop("line_join", "bevel")
if "color" in line_kwargs:
raise RuntimeError(
"Cannot specify line color. Specify color scheme with `palette` kwarg."
)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 150
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 600
x_axis_label = kwargs.pop("x_axis_label", "step")
if "y_axis_label" in kwargs:
raise RuntimeError(
"`y_axis_label` cannot be specified; it is inferred from samples."
)
if "x_range" not in kwargs:
kwargs["x_range"] = [df["draw__"].min(), df["draw__"].max()]
plots = []
grouped = df.groupby("chain__")
for i, (var, label) in enumerate(zip(parameters, labels)):
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=label, **kwargs
)
for i, (chain, group) in enumerate(grouped):
p.line(
group["draw__"],
group[var],
line_width=line_width,
line_join=line_join,
color=palette[i],
*line_kwargs,
)
plots.append(p)
if len(plots) == 1:
return plots[0]
# Link ranges
for i, p in enumerate(plots[:-1]):
plots[i].x_range = plots[-1].x_range
return bokeh.layouts.gridplot(plots, ncols=1)
def corner(
samples=None,
parameters=None,
palette=None,
omit=None,
include_ppc=False,
include_log_lik=False,
max_plotted=10000,
datashade=False,
frame_width=None,
frame_height=None,
plot_ecdf=False,
ecdf_staircase=False,
cmap="black",
color_by_chain=False,
divergence_color="orange",
alpha=None,
single_var_color="black",
bins="freedman-diaconis",
show_contours=False,
contour_color="black",
bins_2d=50,
levels=None,
weights=None,
smooth=0.02,
extend_contour_domain=False,
xtick_label_orientation="horizontal",
):
"""
Make a corner plot of sampling results. Heavily influenced by the
corner package by <NAME>.
Parameters
----------
samples : Numpy array, Pandas DataFrame, or ArviZ InferenceData
Results of sampling. If a Numpy array or Pandas DataFrame, each
row is a sample and each column corresponds to a variable.
parameters : list
List of variables as strings included in `samples` to construct
corner plot. If None, use all parameters. The entries correspond
to column headings if `samples` is in a Pandas DataFrame. If the
input is a Numpy array, `parameters` is a list of indices of
columns to use in the plot. If `samples` is an ArviZ
InferenceData instance, `parameters` contains the names of
parameters to include in the plot. For multidimensional
parameters, each entry must be given separately, e.g.,
`['alpha[0]', 'alpha[1]', 'beta[0,1]']`. If a given entry is a
2-tuple, the first entry is the variable name, and the second
entry is the label for the parameter in plots.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
the default color cycle employed by Altair. Ignored is
`color_by_chain` is False.
omit : str, re.Pattern, or list or tuple of str and re.Pattern
If `parameters` is not provided, all parameters are used in the
parallel coordinate plot. We often want to ignore samples of
some variables. For each string entry in `omit`, the variable
given by the string is omitted. For each entry that is a
compiled regular expression patters (`re.Pattern`), any variable
name matching the pattern is omitted.
include_ppc : bool, default False
If True, include variables ending in _ppc, which denotes
posterior predictive checks, in the plot.
include_log_lik: bool, default False
If True, include variables starting with log_lik or loglik.
These denote log-likelihood contributions.
max_plotted : int, default 10000
Maximum number of points to be plotted.
datashade : bool, default False
Whether or not to convert sampled points to a raster image using
Datashader.
frame_width : int or None, default None
Width of each plot in the corner plot in pixels. Default is set
based on number of parameters plotted. If None and
`frame_height` is specificed, `frame_width` is set to
`frame_height`.
frame_height : int or None, default None
Height of each plot in the corner plot in pixels. Default is set
based on number of parameters plotted. If None and `frame_width`
is specificed, `frame_height` is set to `frame_width`.
plot_ecdf : bool, default False
If True, plot ECDFs of samples on the diagonal of the corner
plot. If False, histograms are plotted.
ecdf_staircase : bool, default False
If True, plot the ECDF in "staircase" style. Otherwise, plot as
dots. Ignored if `plot_ecdf` is False.
cmap : str, default 'black'
Valid colormap string for DataShader or for coloring Bokeh
glyphs.
color_by_chain : bool, default False
If True, color the glyphs by chain index.
divergence_color : str, default 'orange'
Color to use for showing points where the sampler experienced a
divergence.
alpha : float or None, default None
Opacity of glyphs. If None, inferred.
single_var_color : str, default 'black'
Color of histogram or ECDF lines.
bins : int, array_like, or str, default 'freedman-diaconis'
If int or array_like, setting for `bins` kwarg to be passed to
`np.histogram()`. If 'exact', then each unique value in the
data gets its own bin. If 'integer', then integer data is
assumed and each integer gets its own bin. If 'sqrt', uses the
square root rule to determine number of bins. If
`freedman-diaconis`, uses the Freedman-Diaconis rule for number
of bins. Ignored if `plot_ecdf` is True.
show_contours : bool, default False
If True, show contour plot on top of samples.
contour_color : str, default 'black'
Color of contour lines
bins_2d : int, default 50
Number of bins in each direction for binning 2D histograms when
computing contours.
levels : list of floats, default None
Levels to use when constructing contours. By default, these are
chosen according to this principle from <NAME>:
http://corner.readthedocs.io/en/latest/pages/sigmas.html
weights : default None
Value to pass as `weights` kwarg to np.histogram2d(), used in
constructing contours.
smooth : int or None, default 1
Width of smoothing kernel for making contours.
extend_contour_domain : bool, default False
If True, extend the domain of the contours a little bit beyond
the extend of the samples. This is done in the corner package,
but I prefer not to do it.
xtick_label_orientation : str or float, default 'horizontal'.
Orientation of x tick labels. In some plots, horizontally
labeled ticks will have label clashes, and this can fix that.
A preferred alternative to 'horizontal' is `np.pi/4`. Be aware,
though, that non-horizontal tick labels may disrupt alignment
of some of the plots in the corner plot.
Returns
-------
output : Bokeh gridplot
Corner plot as a Bokeh gridplot.
"""
if parameters is not None and omit is not None:
raise RuntimeError("At least one of `parameters` and `omit` must be None.")
omit = _parse_omit(omit, include_ppc, include_log_lik)
# Tools, also allowing linked brushing
tools = "pan,box_zoom,wheel_zoom,box_select,lasso_select,save,reset"
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
if color_by_chain:
if cmap not in ["black", None]:
warnings.warn("Ignoring cmap values to color by chain.")
if divergence_color is None:
divergence_color = cmap
if type(samples) == pd.core.frame.DataFrame:
df = samples
if parameters is None:
parameters = [
col
for col in df.columns
if stan._screen_param(col, omit) and (len(col) < 2 or col[-2:] != "__")
]
elif type(samples) == np.ndarray:
if parameters is None:
parameters = list(range(samples.shape[1]))
for param in parameters:
if (type(param) in [tuple, list] and type(param[0]) != int) or type(
param
) != int:
raise RuntimeError(
"When `samples` is inputted as a Numpy array, `parameters` must be"
" a list of indices."
)
new_parameters = [
str(param) if type(param) == int else param[1] for param in parameters
]
df = pd.DataFrame(samples[:, parameters], columns=new_parameters)
parameters = new_parameters
else:
parameters, df = stan._samples_parameters_to_df(samples, parameters, omit)
parameters, labels = _parse_parameters(parameters)
# Set default frame width and height.
if frame_height is None:
if frame_width is None:
default_dim = 25 * (9 - len(parameters)) if len(parameters) < 5 else 100
frame_height = default_dim
frame_width = default_dim
else:
frame_height = frame_width
elif frame_width is None:
frame_width = frame_height
if len(parameters) > 6:
raise RuntimeError("For space purposes, can show only six parameters.")
if color_by_chain:
# Have to convert datatype to string to play nice with Bokeh
df["chain__"] = df["chain__"].astype(str)
factors = tuple(df["chain__"].unique())
cmap = bokeh.transform.factor_cmap("chain__", palette=palette, factors=factors)
# Add dummy divergent column if no divergence information is given
if "diverging__" not in df.columns:
df = df.copy()
df["diverging__"] = 0
# Add dummy chain column if no divergence information is given
if "chain__" not in df.columns:
df = df.copy()
df["chain__"] = 0
for col in parameters:
if col not in df.columns:
raise RuntimeError("Column " + col + " not in the columns of DataFrame.")
if labels is None:
labels = parameters
elif len(labels) != len(parameters):
raise RuntimeError("len(parameters) must equal len(labels)")
if plot_ecdf and not ecdf_staircase:
for param in parameters:
df[f"__ECDF_{param}"] = df[param].rank(method="first") / len(df)
n_nondivergent = np.sum(df["diverging__"] == 0)
if n_nondivergent > max_plotted:
inds = np.random.choice(
np.arange(n_nondivergent), replace=False, size=max_plotted
)
else:
inds = np.arange(np.sum(df["diverging__"] == 0))
if alpha is None:
if len(inds) < 100:
alpha = 1
elif len(inds) > 10000:
alpha = 0.02
else:
alpha = np.exp(-(np.log10(len(inds)) - 2) / (-2 / np.log(0.02)))
# Set up column data sources to allow linked brushing
cds = bokeh.models.ColumnDataSource(df.loc[df["diverging__"] == 0, :].iloc[inds, :])
cds_div = bokeh.models.ColumnDataSource(df.loc[df["diverging__"] == 1, :])
# Set up contour settings
contour_lines_kwargs = dict(
smooth=smooth,
levels=levels,
weights=weights,
extend_domain=extend_contour_domain,
bins=bins_2d,
)
# Just make a single plot if only one parameter
if len(parameters) == 1:
x = parameters[0]
if plot_ecdf:
p = bokeh.plotting.figure(
frame_width=frame_width,
frame_height=frame_height,
x_axis_label=labels[0],
y_axis_label="ECDF",
)
if ecdf_staircase:
p = _ecdf(
df[x].iloc[inds],
staircase=True,
line_width=2,
line_color=single_var_color,
p=p,
)
else:
p.circle(source=cds, x=x, y=f"__ECDF_{x}", color=single_var_color)
p.circle(source=cds_div, x=x, y=f"__ECDF_{x}", color=divergence_color)
else:
p = bokeh.plotting.figure(
frame_width=frame_width,
frame_height=frame_height,
x_axis_label=labels[0],
y_axis_label="density",
)
p = _histogram(
df[x].values,
bins=bins,
density=True,
line_kwargs=dict(line_width=2, line_color=single_var_color),
p=p,
)
if frame_height < 200:
p.toolbar_location = "above"
p.xaxis.major_label_orientation = xtick_label_orientation
return p
plots = [[None for _ in range(len(parameters))] for _ in range(len(parameters))]
for i, j in zip(*np.tril_indices(len(parameters))):
x = parameters[j]
if i != j:
y = parameters[i]
x_range, y_range = _data_range(df, x, y)
scatter_figure_kwargs = dict(
x_range=x_range,
y_range=y_range,
frame_width=frame_width,
frame_height=frame_height,
)
plots[i][j] = _corner_scatter(
cds,
cds_div,
x,
y,
datashade,
alpha,
cmap,
show_contours,
divergence_color,
contour_color,
contour_lines_kwargs,
scatter_figure_kwargs,
)
else:
if plot_ecdf:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range,
y_range=[-0.02, 1.02],
frame_width=frame_width,
frame_height=frame_height,
align="end",
tools=tools,
)
if ecdf_staircase:
plots[i][i] = _ecdf(
df[x],
p=plots[i][i],
staircase=True,
line_width=2,
line_color=single_var_color,
)
else:
plots[i][i].circle(
source=cds,
x=x,
y=f"__ECDF_{x}",
size=2,
color=cmap,
nonselection_fill_alpha=0,
nonselection_line_alpha=0,
)
plots[i][i].circle(
source=cds_div,
x=x,
y=f"__ECDF_{x}",
size=2,
color=divergence_color,
nonselection_fill_alpha=0,
nonselection_line_alpha=0,
)
else:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range,
y_range=bokeh.models.DataRange1d(start=0.0),
frame_width=frame_width,
frame_height=frame_height,
align="end",
tools=tools,
)
bins_plot = _bins_to_np(df[x].values, bins)
e0, f0 = _compute_histogram(df[x].values, bins=bins_plot, density=True)
plots[i][i].line(e0, f0, line_width=2, color=single_var_color)
plots[i][j].xaxis.major_label_orientation = xtick_label_orientation
# Link axis ranges
for i in range(1, len(parameters)):
for j in range(i):
plots[i][j].x_range = plots[j][j].x_range
plots[i][j].y_range = plots[i][i].x_range
# Label axes
for i, label in enumerate(labels):
plots[-1][i].xaxis.axis_label = label
for i, label in enumerate(labels[1:]):
plots[i + 1][0].yaxis.axis_label = label
if plot_ecdf:
plots[0][0].yaxis.axis_label = "ECDF"
# Take off tick labels
for i in range(len(parameters) - 1):
for j in range(i + 1):
plots[i][j].xaxis.major_label_text_font_size = "0pt"
if not plot_ecdf:
plots[0][0].yaxis.major_label_text_font_size = "0pt"
for i in range(1, len(parameters)):
for j in range(1, i + 1):
plots[i][j].yaxis.major_label_text_font_size = "0pt"
grid = bokeh.layouts.gridplot(plots, toolbar_location="left")
return grid
def contour(
X,
Y,
Z,
levels=None,
p=None,
overlaid=False,
cmap=None,
overlay_grid=False,
fill=False,
fill_palette=None,
fill_alpha=0.75,
line_kwargs={},
**kwargs,
):
"""
Make a contour plot, possibly overlaid on an image.
Parameters
----------
X : 2D Numpy array
Array of x-values, as would be produced using np.meshgrid()
Y : 2D Numpy array
Array of y-values, as would be produced using np.meshgrid()
Z : 2D Numpy array
Array of z-values.
levels : array_like
Levels to plot, ranging from 0 to 1. The contour around a given
level contains that fraction of the total probability if the
contour plot is for a 2D probability density function. By
default, the levels are given by the one, two, three, and four
sigma levels corresponding to a marginalized distribution from
a 2D Gaussian distribution.
p : bokeh plotting object, default None
If not None, the contour are added to `p`. This option is not
allowed if `overlaid` is True.
overlaid : bool, default False
If True, `Z` is displayed as an image and the contours are
overlaid.
cmap : str or list of hex colors, default None
If `im` is an intensity image, `cmap` is a mapping of
intensity to color. If None, default is 256-level Viridis.
If `im` is a color image, then `cmap` can either be
'rgb' or 'cmy' (default), for RGB or CMY merge of channels.
overlay_grid : bool, default False
If True, faintly overlay the grid on top of image. Ignored if
overlaid is False.
line_kwargs : dict, default {}
Keyword arguments passed to `p.multiline()` for rendering the
contour.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()`.
Returns
-------
output : Bokeh plotting object
Plot populated with contours, possible with an image.
"""
if len(X.shape) != 2 or Y.shape != X.shape or Z.shape != X.shape:
raise RuntimeError("All arrays must be 2D and of same shape.")
if overlaid and p is not None:
raise RuntimeError("Cannot specify `p` if showing image.")
# Set defaults
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y")
if "line_color" not in line_kwargs:
if overlaid:
line_kwargs["line_color"] = "white"
else:
line_kwargs["line_color"] = "black"
line_width = line_kwargs.pop("line_width", 2)
if p is None:
if overlaid:
frame_height = kwargs.pop("frame_height", 300)
frame_width = kwargs.pop("frame_width", 300)
title = kwargs.pop("title", None)
p = image.imshow(
Z,
cmap=cmap,
frame_height=frame_height,
frame_width=frame_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_range=[X.min(), X.max()],
y_range=[Y.min(), Y.max()],
no_ticks=False,
flip=False,
return_im=False,
)
else:
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 300
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 300
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
# Set default levels
if levels is None:
levels = 1.0 - np.exp(-np.arange(0.5, 2.1, 0.5) ** 2 / 2)
# Compute contour lines
if fill or line_width:
xs, ys = _contour_lines(X, Y, Z, levels)
# Make fills. This is currently not supported
if fill:
raise NotImplementedError("Filled contours are not yet implemented.")
if fill_palette is None:
if len(levels) <= 6:
fill_palette = bokeh.palettes.Greys[len(levels) + 3][1:-1]
elif len(levels) <= 10:
fill_palette = bokeh.palettes.Viridis[len(levels) + 1]
else:
raise RuntimeError(
"Can only have maximally 10 levels with filled contours"
+ " unless user specifies `fill_palette`."
)
elif len(fill_palette) != len(levels) + 1:
raise RuntimeError(
"`fill_palette` must have 1 more entry" + " than `levels`"
)
p.patch(
xs[-1], ys[-1], color=fill_palette[0], alpha=fill_alpha, line_color=None
)
for i in range(1, len(levels)):
x_p = np.concatenate((xs[-1 - i], xs[-i][::-1]))
y_p = np.concatenate((ys[-1 - i], ys[-i][::-1]))
p.patch(x_p, y_p, color=fill_palette[i], alpha=fill_alpha, line_color=None)
p.background_fill_color = fill_palette[-1]
# Populate the plot with contour lines
p.multi_line(xs, ys, line_width=line_width, **line_kwargs)
if overlay_grid and overlaid:
p.grid.level = "overlay"
p.grid.grid_line_alpha = 0.2
return p
def mpl_cmap_to_color_mapper(cmap):
"""
Convert a Matplotlib colormap to a bokeh.models.LinearColorMapper
instance.
Parameters
----------
cmap : str
A string giving the name of the color map.
Returns
-------
output : bokeh.models.LinearColorMapper instance
A linear color_mapper with 25 gradations.
Notes
-----
See https://matplotlib.org/examples/color/colormaps_reference.html
for available Matplotlib colormaps.
"""
cm = mpl_get_cmap(cmap)
palette = [rgb_frac_to_hex(cm(i)[:3]) for i in range(256)]
return bokeh.models.LinearColorMapper(palette=palette)
def _corner_scatter(
cds,
cds_div,
x,
y,
datashade,
alpha,
cmap,
show_contours,
divergence_color,
contour_color,
contour_lines_kwargs,
figure_kwargs,
):
"""Create scatter plot for non-datashaded corner plot."""
if datashade:
cmap_arg = list(cmap) if type(cmap) == tuple else cmap
xlim = tuple(figure_kwargs.pop("x_range"))
ylim = tuple(figure_kwargs.pop("y_range"))
p = hv.render(
hv.operation.datashader.dynspread(
hv.operation.datashader.datashade(
hv.Points(data=cds.data, kdims=[x, y]),
cmap=cmap_arg,
cnorm="linear",
)
).opts(show_grid=True, align="end", xlim=xlim, ylim=ylim, **figure_kwargs)
)
else:
p = bokeh.plotting.figure(align="end", **figure_kwargs)
p.circle(
source=cds,
x=x,
y=y,
size=2,
alpha=alpha,
color=cmap,
nonselection_fill_alpha=0,
nonselection_line_alpha=0,
)
if divergence_color is not None:
p.circle(
source=cds_div,
x=x,
y=y,
size=2,
color=divergence_color,
nonselection_fill_alpha=0,
nonselection_line_alpha=0,
)
if show_contours:
xs, ys = contour_lines_from_samples(
cds.data[x], cds.data[y], **contour_lines_kwargs
)
p.multi_line(xs, ys, line_color=contour_color, line_width=2)
return p
def _ecdf_vals(data, staircase=False, complementary=False):
"""Get x, y, values of an ECDF for plotting.
Parameters
----------
data : ndarray
One dimensional Numpy array with data.
staircase : bool, default False
If True, generate x and y values for staircase ECDF (staircase). If
False, generate x and y values for ECDF as dots.
complementary : bool
If True, return values for ECCDF.
Returns
-------
x : ndarray
x-values for plot
y : ndarray
y-values for plot
"""
x = np.sort(data)
y = np.arange(1, len(data) + 1) / len(data)
if staircase:
x, y = cdf_to_staircase(x, y)
if complementary:
y = 1 - y
elif complementary:
y = 1 - y + 1 / len(y)
return x, y
@numba.jit(nopython=True)
def _ecdf_arbitrary_points(data, x):
"""Give the value of an ECDF at arbitrary points x."""
y = np.arange(len(data) + 1) / len(data)
return y[np.searchsorted(np.sort(data), x, side="right")]
def _ecdf_from_samples(df, name, ptiles, x):
"""Compute ECDFs and percentiles from samples."""
df_ecdf = pd.DataFrame()
df_ecdf_vals =
|
pd.DataFrame()
|
pandas.DataFrame
|
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=
|
pd.DateOffset(days=3)
|
pandas.DateOffset
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import array
import functools
import gzip
import operator
import os
import struct
import numpy as np
import pandas as pd
import requests
class DataManager:
_train_data_url: str
_train_labels_url: str
_test_data_url: str
_test_labels_url: str
_train_data_loc: str
_train_labels_loc: str
_test_data_loc: str
_test_labels_loc: str
_data_columns: []
_data_string_labels: []
def __init__(self):
self._train_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
self._test_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
self._train_data_loc = "systemds/examples/tutorials/adult/train_data.csv"
self._test_data_loc = "systemds/examples/tutorials/adult/test_data.csv"
self._data_columns = ["age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation",
"relationship", "race", "sex", "capital-gain", "capital-loss", "hours-per-week", "native-country",
"income"]
self._classification_features_labels = [{'workclass': ['Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay', 'Never-worked']},
{'education': ['Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th', '7th-8th', '12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool']},
{'marital-status': ['Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed', 'Married-spouse-absent', 'Married-AF-spouse']},
{'occupation': ['Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty', 'Handlers-cleaners', 'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving', 'Priv-house-serv', 'Protective-serv', 'Armed-Forces']},
{'relationship': ['Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried']},
{'race': ['White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black']},
{'sex': ['Female', 'Male']},
{'native-country': ['United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany', 'Outlying-US(Guam-USVI-etc)', 'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran', 'Honduras', 'Philippines', 'Italy', 'Poland', 'Jamaica', 'Vietnam', 'Mexico', 'Portugal', 'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador', 'Taiwan', 'Haiti', 'Columbia', 'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia', 'El-Salvador', 'Trinadad&Tobago', 'Peru', 'Hong', 'Holand-Netherlands']},
{'income': ['>50K', '<=50K']}]
def get_train_data(self) -> np.array:
self._get_data(self._train_data_url, self._train_data_loc)
return self._parse_data(self._train_data_loc)\
.drop(labels=self._data_columns[len(self._data_columns)-1], axis=1).to_numpy()
def get_train_labels(self) -> np.array:
self._get_data(self._train_data_url, self._train_data_loc)
data_list = self._data_columns.copy()
data_list.pop(len(self._data_columns)-1)
data = self._parse_data(self._train_data_loc).drop(labels=data_list, axis=1)
return data.to_numpy().flatten()
def get_test_data(self) -> np.array:
self._get_data(self._test_data_url, self._test_data_loc)
return self._parse_data(self._test_data_loc)\
.drop(labels=self._data_columns[len(self._data_columns)-1], axis=1).iloc[1:].to_numpy()
def get_test_labels(self) -> np.array:
self._get_data(self._test_data_url, self._test_data_loc)
data_list = self._data_columns.copy()
data_list.pop(len(self._data_columns)-1)
data = self._parse_data(self._test_data_loc).drop(labels=data_list, axis=1).iloc[1:]
data["income"] = data["income"].str.replace('>50K.','>50K', regex=False)
data["income"] = data["income"].str.replace('<=50K.','<=50K', regex=False)
return data.to_numpy().flatten()
def _parse_data(self, loc) -> pd.DataFrame:
return pd.read_csv(loc, header=None, names=self._data_columns)
def _get_data(self, url, loc):
if not os.path.isfile(loc):
myfile = requests.get(url)
folder = os.path.dirname(loc)
if not os.path.isdir(folder):
os.makedirs(folder)
with open(loc, 'wb') as f:
f.write(myfile.content)
def get_preprocessed_dataset(self, interpolate=False, standardize=False, dimred=0):
train_array = np.concatenate([self.get_train_data(), self.get_train_labels()[...,np.newaxis]], axis=1)
train_dataset = pd.DataFrame(train_array, columns=self._data_columns)
test_array = np.concatenate([self.get_test_data(), self.get_test_labels()[...,np.newaxis]], axis=1)
test_dataset =
|
pd.DataFrame(test_array, columns=self._data_columns)
|
pandas.DataFrame
|
import os
import numpy as np
import pandas as pd
from utils.data import join_tables, load_data, save_data
def test_save_data(tmpdir):
"""test save data to csv"""
save_data(tmpdir.join("test.csv"), [["Poprad", "Poprad", "A", "Adam", "Adam"]])
with open(tmpdir.join("test.csv"), mode="r") as file:
lines = file.readlines()
assert len(lines) == 2, "file have wrong number of lines"
assert lines[0] == ";".join(["okres", "katastralne uzemie", "prve pismeno", "priezvisko", f"vlastnik{os.linesep}"])
assert lines[1] == ";".join(["Poprad", "Poprad", "A", "Adam", f"Adam{os.linesep}"])
def test_load_data(tmpdir):
"""test load data from csv"""
save_data(tmpdir.join("test.csv"), [["Poprad", "Poprad", "A", "Adam", "Adam"]])
df = load_data(tmpdir.join("test.csv"))
assert df.shape == (1, 5), "loaded data has wrong size"
assert df.iloc[0]["okres"] == "Poprad"
assert df.iloc[0]["katastralne uzemie"] == "Poprad"
assert df.iloc[0]["prve pismeno"] == "A"
assert df.iloc[0]["priezvisko"] == "Adam"
assert df.iloc[0]["vlastnik"] == "Adam"
def test_join_data():
"""tsst join data"""
df1 = pd.DataFrame({"A": [1, 2, 3], "B": [1, 1, 1]})
df2 =
|
pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 2]})
|
pandas.DataFrame
|
import argparse
import datetime
import json
import os
import tqdm
import numpy as np
import pandas as pd
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
"""Nucleus sampling"""
batch, _ = logits.shape.as_list()
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
# number of indices to include
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
def evaluate(inp_img, transformer, tokenizer, max_length=128):
# The first token to the transformer should be the start token
output = tf.convert_to_tensor([[tokenizer.token_to_id('<s>')]])
for _ in tqdm.tqdm(range(max_length)):
combined_mask = create_target_masks(output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(inp_img,
output,
False,
combined_mask,
None)
# select the last word from the seq_len dimension
predictions = predictions[:, -1, :] # (batch_size, vocab_size)
# predictions = top_k_logits(predictions, k=6)
# predictions = top_p_logits(predictions, p=0.5)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)[:, tf.newaxis]
# predicted_id = tf.random.categorical(predictions, num_samples=1, dtype=tf.int32)
# return the result if the predicted_id is equal to the end token
if predicted_id == 2: # stop token #tokenizer_en.vocab_size + 1:
return tf.squeeze(output, axis=0)[1:], attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0)[1:], attention_weights
def main(args, hparams):
# Get test dataset
test_dataset, tokenizer = get_mimic_dataset(args.csv_root, args.vocab_root, args.mimic_root,
batch_size=args.batch_size, mode='test')
# Define model
target_vocab_size = tokenizer.get_vocab_size()
transformer = Transformer(hparams['n_layer'], hparams['d_model'],
hparams['n_head'], hparams['dff'],
target_vocab_size=target_vocab_size,
rate=hparams['dropout_rate'],
input_shape=(hparams['img_x'], hparams['img_y'], hparams['img_ch']))
# Restore checkpoint
ckpt = tf.train.Checkpoint(transformer=transformer)
checkpoint_path = os.path.join('checkpoints', args.model_name)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)
if latest_checkpoint:
print(f'{datetime.datetime.now()}: [*] Restoring Checkpoint: {latest_checkpoint}')
ckpt.restore(latest_checkpoint)
else:
print(f'{datetime.datetime.now()}: [*] No checkpoints found. Exiting.')
exit(0)
#################### Run inference ####################
pred_txt = dict()
true_txt = dict()
t = tqdm.tqdm(enumerate(test_dataset), total=len(test_dataset))
for (idx, (inp, tar)) in t:
true_txt[idx] = tokenizer.decode(np.trim_zeros(tar[0].numpy(), 'b')[1:-1])
result, attention_weights = evaluate(inp, transformer=transformer, tokenizer=tokenizer)
pred_txt[idx] = tokenizer.decode(result)
pred_txt_df = pd.DataFrame.from_dict(pred_txt, orient='index')
true_txt_df =
|
pd.DataFrame.from_dict(true_txt, orient='index')
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
"""Data_Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/106zvCx_5_p0TlKI3zkCcEb0VbnWwdahx
"""
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
import xgboost as xgb
from sklearn.metrics import mean_squared_error, r2_score
import re
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
"""
1. Preprocessing Functions:
"""
def calc_change_sentiment(data):
change_in_sent = []
change_in_sent.append(data['compound'][0])
for i in range(1,len(data['compound'])):
if data['compound'][i] == 0:
change_in_sent.append(0)
elif data['compound'][i] < 0 or data['compound'][i] > 0:
dif = data['compound'][i] - data['compound'][(i-1)]
change_in_sent.append(dif)
return change_in_sent
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, '', input_txt)
return input_txt
def clean_tweets(tweets):
tweets = np.vectorize(remove_pattern)(tweets, "RT @[\w]*:")
tweets = np.vectorize(remove_pattern)(tweets, "@[\w]*")
tweets = np.vectorize(remove_pattern)(tweets, "https?://[A-Za-z0-9./]*")
tweets = np.core.defchararray.replace(tweets, "[^a-zA-Z]", " ")
return tweets
def classify_news(dataframe):
day23, day24, day25, day26, day27, day28, day29, day30, day31, day32, day33, day34, day35, day36, day37, day38 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
for i in range(len(dataframe['timestamp'])):
if dataframe['timestamp'][i].day == 23 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day23.append(i)
elif dataframe['timestamp'][i].day == 24 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day24.append(i)
elif dataframe['timestamp'][i].day == 25 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day25.append(i)
elif dataframe['timestamp'][i].day == 26 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day26.append(i)
elif dataframe['timestamp'][i].day == 27 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day27.append(i)
elif dataframe['timestamp'][i].day == 28 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day28.append(i)
elif dataframe['timestamp'][i].day == 29 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day29.append(i)
elif dataframe['timestamp'][i].day == 30 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day30.append(i)
elif dataframe['timestamp'][i].day == 1 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day31.append(i)
elif dataframe['timestamp'][i].day == 2 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day32.append(i)
elif dataframe['timestamp'][i].day == 3 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day33.append(i)
elif dataframe['timestamp'][i].day == 4 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day34.append(i)
elif dataframe['timestamp'][i].day == 5 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day35.append(i)
elif dataframe['timestamp'][i].day == 6 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day36.append(i)
elif dataframe['timestamp'][i].day == 7 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day37.append(i)
elif dataframe['timestamp'][i].day == 8 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day38.append(i)
else:
pass
news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38 = dataframe.iloc[day23],dataframe.iloc[day24],dataframe.iloc[day25], dataframe.iloc[day26], dataframe.iloc[day27],dataframe.iloc[day28],dataframe.iloc[day29],dataframe.iloc[day30],dataframe.iloc[day31], dataframe.iloc[day32],dataframe.iloc[day33],dataframe.iloc[day34],dataframe.iloc[day35],dataframe.iloc[day36],dataframe.iloc[day37],dataframe.iloc[day38]
return news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38
def preprocess_headlines(data):
data.drop_duplicates(subset='headline',keep=False, inplace=True)
data.drop(['ticker','neg','neu','pos'], axis=1, inplace=True)
data.rename(columns={'date_time':'timestamp'},inplace=True)
data.set_index('timestamp', inplace=True)
data_30m = data.resample('30min').median().ffill().reset_index()
headline_sma = data_30m['compound'].rolling(3).mean()
data_30m['Compound SMA(3) Headlines'] = headline_sma
change_in_sent=calc_change_sentiment(data_30m)
data_30m['change in sentiment headlines'] = change_in_sent
data_30m['change in sentiment headlines (t-1)'] = data_30m['change in sentiment headlines'].shift(1)
news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38 = classify_news(data_30m)
news_d23_red,news_d24_red, news_d25_red, news_d28_red,news_d29_red,news_d30_red,news_d31_red,news_d32_red,news_d35_red,news_d36_red,news_d37_red,news_d38_red = news_d23.iloc[4:],news_d24.iloc[1:],news_d25.iloc[1:],news_d28.iloc[1:],news_d29.iloc[1:],news_d30.iloc[1:],news_d31.iloc[1:],news_d32.iloc[1:],news_d35.iloc[1:],news_d36.iloc[1:],news_d37.iloc[1:],news_d38.iloc[1:]
frames_news = [news_d23_red,news_d24_red, news_d25_red, news_d28_red,news_d29_red,news_d30_red,news_d31_red,news_d32_red,news_d35_red,news_d36_red,news_d37_red,news_d38_red]
processed_headlines = pd.concat(frames_news)
return processed_headlines
def preprocess_posts(dataframe):
dataframe.drop(['neg','neu','pos','followers_count'],axis=1,inplace=True)
dataframe['timestamp'] = dataframe['timestamp'].dt.tz_localize('UTC').dt.tz_convert('America/Montreal').dt.tz_localize(None)
dataframe.set_index('timestamp', inplace=True)
twitter_df_30m = dataframe.resample('30min').median().ffill().reset_index()
change_in_sent = calc_change_sentiment(twitter_df_30m)
twitter_sma = twitter_df_30m['compound'].rolling(3).mean()
twitter_df_30m['Compound SMA(3) Twitter'] = twitter_sma
twitter_df_30m['change in sentiment twitter'] = change_in_sent
twitter_df_30m['change in sentiment twitter (t-1)'] = twitter_df_30m['change in sentiment twitter'].shift(1)
tw_news_d23,tw_news_d24,tw_news_d25,tw_news_d26,tw_news_d27,tw_news_d28,tw_news_d29,tw_news_d30,tw_news_d31,tw_news_d32,tw_news_d33,tw_news_d34,tw_news_d35,tw_news_d36,tw_news_d37,tw_news_d38 = classify_news(twitter_df_30m)
tw_news_d23_30m,tw_news_d24_30m,tw_news_d25_30m, tw_news_d28_30m,tw_news_d29_30m,tw_news_d30_30m,tw_news_d31_30m,tw_news_d32_30m,tw_news_d35_30m,tw_news_d36_30m,tw_news_d37_30m,tw_news_d38_30m = tw_news_d23.iloc[4:],tw_news_d24.iloc[1:],tw_news_d25.iloc[1:],tw_news_d28.iloc[1:],tw_news_d29.iloc[1:],tw_news_d30.iloc[1:],tw_news_d31.iloc[1:],tw_news_d32.iloc[1:],tw_news_d35.iloc[1:],tw_news_d36.iloc[1:],tw_news_d37.iloc[1:],tw_news_d38.iloc[1:]
frames = [tw_news_d23_30m,tw_news_d24_30m,tw_news_d25_30m,tw_news_d28_30m,tw_news_d29_30m,tw_news_d30_30m,tw_news_d31_30m,tw_news_d32_30m,tw_news_d35_30m,tw_news_d36_30m,tw_news_d37_30m,tw_news_d38_30m]
processed_tweets = pd.concat(frames)
return processed_tweets
"""2 Modeling Functions:"""
def baseline_model(data):
pred = data['SMA(3)'][3:]
actu = data['Adj Close'][3:]
rmse = np.sqrt(mean_squared_error(actu,pred))
r2_sco = r2_score(actu,pred)
return rmse, r2_sco
def linear_modeling_no_sentiment(dataframe):
x_var = ['Adj Close','Scaled Volume','SMA(3)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][3:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][3:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
return rmse,r2_sco,rmse2,r2_sco2
def linear_modeling_headlines(dataframe):
x_var = ['Adj Close','Scaled Volume','compound','Compound SMA(3) Headlines','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
# xg_reg = xgb.XGBRegressor(colsample_bytree= 0.4, gamma= 0.4, learning_rate= 0.05, max_depth= 4, n_estimators= 10000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds4 = svr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test,preds4))
r2_sco4 = r2_score(y_test,preds4)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4
def linear_model_twitter(dataframe):
x_var = ['Adj Close','Scaled Volume','compound','Compound SMA(3) Twitter','SMA(3)','change in sentiment twitter','change in sentiment twitter (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
# xg_reg = xgb.XGBRegressor(colsample_bytree= 0.4, gamma= 0.4, learning_rate= 0.05, max_depth= 4, n_estimators= 10000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds4 = svr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test,preds4))
r2_sco4 = r2_score(y_test,preds4)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4
def multi_model_full(dataframe):
x_var = ['Adj Close','Scaled Volume','compound_y','compound_x','Compound SMA(3) Headlines','Compound SMA(3) Twitter','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)','change in sentiment twitter','change in sentiment twitter (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
rf_regr = RandomForestRegressor(n_estimators=20, max_depth=600, random_state=42)
rf_regr.fit(X_train,y_train)
preds4 = rf_regr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test, preds4))
r2_sco4 = r2_score(y_test,preds4)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds5 = svr.predict(X_test)
rmse5 = np.sqrt(mean_squared_error(y_test,preds5))
r2_sco5 = r2_score(y_test,preds5)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4,rmse5,r2_sco5
"""## 2. Evaluate Model with Individual Stocks:"""
def import_data(ticker):
# 1. Historical Stock Data:
stock_df = pd.read_csv('Dataset/1.Stock_Data/'+ticker+'_data.csv', index_col=0, parse_dates=['Datetime'])
stock_df['Percent Price Change Within Period (t+1)'] = stock_df['Percent Price Change Within Period'].shift(-1)
# 2. Headline Data:
headlines1 = pd.read_csv('Dataset/2.FinViz_Headline_Data/'+ticker+'_2020-09-23_2020-10-07.csv', index_col=0, parse_dates=['date_time'])
frames = [headlines1]
headlines_df = pd.concat(frames)
headlines_df.drop_duplicates(subset='headline',keep='first',inplace=True)
# 3. Twitter Data:
twitter1 = pd.read_csv('Dataset/3.Twitter_Data/'+ticker+'_2020-09-23_2020-10-07.csv', index_col=0, parse_dates=['timestamp'])
# twitter2 = pd.read_csv('3.Twitter_Data/'+ticker+'_2020-10-07.csv',index_col=0, parse_dates=['timestamp'])
# twitter3 = pd.read_csv('Dataset/3.Twitter_Data/'+ticker+'_2020-10-07_2.csv',index_col=0, parse_dates=['timestamp'])
frames = [twitter1]
twitter_df = pd.concat(frames)
twitter_df.drop_duplicates(subset='tweet_text',keep='first', inplace=True)
twitter_df.sort_values('timestamp',ascending=False,inplace=True)
twitter_df.reset_index(drop=True)
# twitter_df.to_csv('Dataset/3.Twitter_Data/'+ticker+'_2020-09-23_2020-10-07.csv')
return stock_df,headlines_df,twitter_df
def evaluate_models(baseline_df, headline_df, twitter_df):
#1. Baseline:
baseline_rmse, baseline_r2 = baseline_model(baseline_df)
baseline_df2 = baseline_df
baseline_df2['Percent Price Change Within Period (t+1)'] = baseline_df2['Percent Price Change Within Period'].shift(-1)
lm_baseline_rmse, lm_baseline_r2, sgd_baseline_rmse, sgd_baseline_r2 = linear_modeling_no_sentiment(baseline_df2)
#2. Headline Final Merge:
headlines_final = preprocess_headlines(headline_df)
with_headlines_df = stock_df.merge(headlines_final, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
with_headlines_df['Percent Price Change Within Period (t+1)'] = with_headlines_df['Percent Price Change Within Period'].shift(-1)
#3. Twitter Final Merge:
final_twitter = preprocess_posts(twitter_df)
with_twitter_df = stock_df.merge(final_twitter, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
with_twitter_df['Percent Price Change Within Period (t+1)'] = with_twitter_df['Percent Price Change Within Period'].shift(-1)
full_df = with_twitter_df.merge(headlines_final, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
full_df['Percent Price Change Within Period (t+1)'] = full_df['Percent Price Change Within Period'].shift(-1)
#5. Evaluating Models:
lm_headlines_rmse, lm_headlines_r2, sgd_headlines_rmse, sgd_headlines_r2,xgb_headlines_rmse,xgb_headlines_r2,svr_headlines_rmse,svr_headlines_r2 = linear_modeling_headlines(with_headlines_df)
lm_twitter_rmse, lm_twitter_r2, sgd_twitter_rmse, sgd_twitter_r2,xgb_twitter_rmse,xgb_twitter_r2,svr_twitter_rmse,svr_twitter_r2 = linear_model_twitter(with_twitter_df)
lm_all_rmse, lm_all_r2, sgd_all_rmse, sgd_all_r2, xgb_all_rmse, xgb_all_r2, rf_all_rmse, rf_all_r2,svr_all_rmse,svr_all_r2 = multi_model_full(full_df)
result_dict = {
'RMSE - Baseline':baseline_rmse, 'R2 - Baseline':baseline_r2, 'Linear RMSE - Baseline':lm_baseline_rmse, 'SGD RMSE - Baseline':sgd_baseline_rmse,
'Linear RMSE - Only Headlines': lm_headlines_rmse, 'SGD RMSE - Only Headlines':sgd_headlines_rmse, 'XGB RMSE - Only Headlines':xgb_headlines_rmse, 'SVR RMSE - Only Headlines':svr_headlines_rmse,
'Linear RMSE - Only Twitter':lm_twitter_rmse, 'SGD RMSE - Only Twitter':sgd_twitter_rmse, 'XGB RMSE - Only Twitter':xgb_twitter_rmse, 'SVR RMSE - Only Twitter':svr_twitter_rmse,
'Linear RMSE - All':lm_all_rmse, 'SGD RMSE - All':sgd_all_rmse, 'XGB RMSE - All':xgb_all_rmse, 'RF RMSE - All':rf_all_rmse,'SVR RMSE - All':svr_all_rmse
}
#7. Convert to DataFrame:
result_df = pd.DataFrame.from_dict(result_dict, orient='index', columns=['Values'])
#result_df.to_csv('~/LighthouseLabs-Final/Report_Analysis/AAPL_complete_analysis.csv')
return result_df, full_df
stock_df,headlines_df,twitter_df = import_data('AAPL')
result_df, full_df = evaluate_models(stock_df,headlines_df,twitter_df)
import seaborn as sn
from matplotlib.pyplot import figure
corrMatrix = full_df.corr()
plt.figure(figsize=(20,15))
sn.heatmap(corrMatrix, annot=True)
plt.show()
i = round(len(full_df['t+1'])*0.6)
x_var_base=['Adj Close','Scaled Volume','SMA(3)']
x_var_headlines=['Adj Close','Scaled Volume','compound_y','Compound SMA(3) Headlines','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)']
x_var_twitter=['Adj Close','Scaled Volume','compound_x','Compound SMA(3) Twitter','SMA(3)','change in sentiment twitter','change in sentiment twitter (t-1)']
x_var_full=['Adj Close','Scaled Volume','compound_y','compound_x','Compound SMA(3) Headlines','Compound SMA(3) Twitter','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)','change in sentiment twitter','change in sentiment twitter (t-1)']
X_train_base,X_test_base=full_df[x_var_base][:i],full_df[x_var_base][i:-1]
X_predic_base = full_df[x_var_base][:-1]
X_train_headlines,X_test_headlines=full_df[x_var_headlines][:i],full_df[x_var_headlines][i:-1]
X_predic_headlines = full_df[x_var_headlines][:-1]
X_train_twitter,X_test_twitter=full_df[x_var_twitter][:i],full_df[x_var_twitter][i:-1]
X_predic_twitter = full_df[x_var_twitter][:-1]
X_train_full, X_test_full = full_df[x_var_full][:i], full_df[x_var_full][i:-1]
X_predic_full = full_df[x_var_full][:-1]
y_train, y_test = full_df['Percent Price Change Within Period (t+1)'][:i], full_df['t+1'][i:-1]
lm = LinearRegression()
lm.fit(X_train_base,y_train)
preds1 = lm.predict(X_predic_base)
preds1 = np.append(preds1,np.NaN)
full_df['base price predictions linear'] = ((preds1/100) * full_df['Adj Close']) + full_df['Adj Close']
svr = SVR(kernel='rbf', C=0.2, epsilon=0.0001)
svr.fit(X_train_headlines,y_train)
preds3 = svr.predict(X_predic_headlines)
preds3 = np.append(preds3,np.NaN)
full_df['headlines price predictions svr'] = ((preds3/100) * full_df['Adj Close']) + full_df['Adj Close']
svr = SVR(kernel='rbf', C=0.2, epsilon=0.0001)
svr.fit(X_train_twitter,y_train)
preds4 = svr.predict(X_predic_twitter)
preds4 = np.append(preds4,np.NaN)
full_df['twitter price predictions svr'] = ((preds4/100) * full_df['Adj Close']) + full_df['Adj Close']
# lm = LinearRegression()
# lm.fit(X_train_full,y_train)
# preds5 = lm.predict(X_predic_full)
svr = SVR(kernel='rbf', C=0.2, epsilon=0.0001)
svr.fit(X_train_full,y_train)
preds5 = svr.predict(X_predic_full)
preds5 = np.append(preds5,np.NaN)
full_df['full price predictions linear'] = ((preds5/100) * full_df['Adj Close']) + full_df['Adj Close']
fig = plt.figure(figsize=(20,30))
price_ax = plt.subplot(2,1,1)
price_ax.plot(full_df.index[:-1], full_df['Adj Close'][:-1], label='Adj Close')
price_ax.plot(full_df.index[:-1], full_df['SMA(3)'][:-1], label='SMA(3)')
price_ax.plot(full_df.index[:-1], full_df['base price predictions linear'][:-1], label='Predictions (base)',linewidth=2)
# price_ax.plot(full_df.index[:-1], full_df['full price predictions svr'][:-1], label='Full SVR training fit')
price_ax.plot(full_df.index[:-1], full_df['full price predictions linear'][:-1], label='Predictions (Full)',linewidth=2)
# price_ax.plot(full_df.index[:-1], full_df['headlines price predictions svr'][:-1], label='Headlines SVR training fit')
# price_ax.plot(full_df.index[i:-1], full_df['headlines price predictions svr'][i:-1], label='Predictions (Headlines)',linewidth=2)
# price_ax.plot(full_df.index[:-1], full_df['twitter price predictions svr'][:-1], label='Twitter SVR training fit')
# price_ax.plot(full_df.index[i:-1], full_df['twitter price predictions svr'][i:-1], label='Predictions (Twitter)',linewidth=2)
price_ax.set_ylabel('Price ($)')
price_ax.grid(which='major', color='k', linestyle='-.', linewidth=0.5)
price_ax.minorticks_on()
price_ax.grid(which='minor', color='k', linestyle=':', linewidth=0.3)
price_ax.legend()
roc_ax = plt.subplot(2,1,2, sharex=price_ax)
roc_ax.plot(full_df.index, full_df['Compound SMA(3) Headlines'],label='Headline')
roc_ax.plot(full_df.index, full_df['Compound SMA(3) Twitter'],label='Twitter')
roc_ax.set_xlabel('Time Period (30 minutes)')
roc_ax.set_ylabel('Headline Sentiment')
roc_ax.grid(which="major", color='k', linestyle='-.', linewidth=0.5)
roc_ax.minorticks_on()
roc_ax.grid(which='minor', color='k', linestyle=':', linewidth=0.3)
roc_ax.legend()
fig.subplots_adjust(hspace=0.1)
"""## 3. Evaluate Model with Multiple Stocks:"""
def import_data2(ticker,ticker2,ticker3,ticker4,ticker5,ticker6,ticker7,ticker8,ticker9,ticker10,ticker11,ticker12,ticker13):
stock_path = 'Dataset/1.Stock_Data/'
headline_path = 'Dataset/2.FinViz_Headline_Data/'
twitter_path = '3.Twitter_Data/'
latest_headlines='10-07'
# 1. Historical Stock Data:------------------------------------------------------------------------------------------
stock_df1 = pd.read_csv(stock_path+ticker+'_data.csv', index_col=0,parse_dates=['Datetime'])
stock_df2 = pd.read_csv(stock_path+ticker2+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df3 = pd.read_csv(stock_path+ticker3+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df4 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df5 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df6 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df7 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df8 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df9 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df10 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df11 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df12 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
stock_df13 = pd.read_csv(stock_path+ticker4+'_data.csv',index_col=0, parse_dates=['Datetime'])
# 2. Headline Data: ----------------------------------------------------------------------------------------------------
headlines1 = pd.read_csv(headline_path+ticker+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines2 = pd.read_csv(headline_path+ticker2+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines3 = pd.read_csv(headline_path+ticker3+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines4 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines5 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines6 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines7 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines8 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines9 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines10 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines11 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines12 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
headlines13 = pd.read_csv(headline_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0, parse_dates=['date_time'])
# 3. Twitter Data:----------------------------------------------------------------------------------------------------
twitter1 = pd.read_csv(twitter_path+ticker+'_2020-09-23_2020-'+latest_headlines+'.csv', index_col=0,parse_dates=['timestamp'])
twitter2 = pd.read_csv(twitter_path+ticker2+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter3 = pd.read_csv(twitter_path+ticker3+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter4 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter5 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter6 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter7 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter8 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter9 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter10 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter11 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter12 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
twitter13 = pd.read_csv(twitter_path+ticker4+'_2020-09-23_2020-'+latest_headlines+'.csv',index_col=0, parse_dates=['timestamp'])
return stock_df1,headlines1,twitter1, stock_df2,headlines2,twitter2, stock_df3,headlines3,twitter3, stock_df4,headlines4,twitter4, stock_df5,headlines5,twitter5, stock_df6,headlines6,twitter6 , stock_df7,headlines7,twitter7, stock_df8,headlines8,twitter8, stock_df9,headlines9,twitter9, stock_df10,headlines10,twitter10, stock_df11,headlines11,twitter11, stock_df12,headlines12,twitter12, stock_df13,headlines13,twitter13
def cleaning_df(stock_df, headline_df, twitter_df):
#2. Headline Final Merge:
headlines_final = preprocess_headlines(headline_df)
with_headlines_df = stock_df.merge(headlines_final, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
with_headlines_df['t+1'] = with_headlines_df['Adj Close'].shift(-1)
#3. Twitter Final Merge:
final_twitter = preprocess_posts(twitter_df)
with_twitter_df = stock_df.merge(final_twitter, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
with_twitter_df['t+1'] = with_twitter_df['Adj Close'].shift(-1)
#4. Full Merge:
full_df = with_twitter_df.merge(headlines_final, left_on='Datetime', right_on='timestamp').drop('timestamp',axis=1)
full_df['t+1'] = full_df['Adj Close'].shift(-1)
return with_headlines_df,with_twitter_df,full_df
stock_df1,headlines1,twitter1, stock_df2,headlines2,twitter2, stock_df3,headlines3,twitter3, stock_df4,headlines4,twitter4, stock_df5,headlines5,twitter5, stock_df6,headlines6,twitter6 , stock_df7,headlines7,twitter7, stock_df8,headlines8,twitter8, stock_df9,headlines9,twitter9, stock_df10,headlines10,twitter10, stock_df11,headlines11,twitter11, stock_df12,headlines12,twitter12, stock_df13,headlines13,twitter13 = import_data2('TSLA','AMZN','AAPL','GOOG', 'FB', 'NFLX', 'CVX','GS','JNJ','NVDA','PFE','NKE','MSFT')
stock_df1['Percent Price Change Within Period (t+1)'] = stock_df1['Percent Price Change Within Period'].shift(-1)
stock_df2['Percent Price Change Within Period (t+1)'] = stock_df2['Percent Price Change Within Period'].shift(-1)
stock_df3['Percent Price Change Within Period (t+1)'] = stock_df3['Percent Price Change Within Period'].shift(-1)
stock_df4['Percent Price Change Within Period (t+1)'] = stock_df4['Percent Price Change Within Period'].shift(-1)
stock_df5['Percent Price Change Within Period (t+1)'] = stock_df5['Percent Price Change Within Period'].shift(-1)
stock_df6['Percent Price Change Within Period (t+1)'] = stock_df6['Percent Price Change Within Period'].shift(-1)
stock_df7['Percent Price Change Within Period (t+1)'] = stock_df7['Percent Price Change Within Period'].shift(-1)
stock_df8['Percent Price Change Within Period (t+1)'] = stock_df8['Percent Price Change Within Period'].shift(-1)
stock_df9['Percent Price Change Within Period (t+1)'] = stock_df9['Percent Price Change Within Period'].shift(-1)
stock_df10['Percent Price Change Within Period (t+1)'] = stock_df10['Percent Price Change Within Period'].shift(-1)
stock_df11['Percent Price Change Within Period (t+1)'] = stock_df11['Percent Price Change Within Period'].shift(-1)
stock_df12['Percent Price Change Within Period (t+1)'] = stock_df12['Percent Price Change Within Period'].shift(-1)
stock_df13['Percent Price Change Within Period (t+1)'] = stock_df13['Percent Price Change Within Period'].shift(-1)
tsla_headlines_df, tsla_twitter_df, tsla_full_df = cleaning_df(stock_df1, headlines1, twitter1)
amzn_headlines_df, amzn_twitter_df, amzn_full_df = cleaning_df(stock_df2, headlines2, twitter2)
aapl_headlines_df, aapl_twitter_df, aapl_full_df = cleaning_df(stock_df3, headlines3, twitter3)
goog_headlines_df, goog_twitter_df, goog_full_df = cleaning_df(stock_df4, headlines4, twitter4)
fb_headlines_df, fb_twitter_df, fb_full_df = cleaning_df(stock_df5, headlines5, twitter5)
nflx_headlines_df, nflx_twitter_df, nflx_full_df = cleaning_df(stock_df6, headlines6, twitter6)
cvx_headlines_df, cvx_twitter_df, cvx_full_df = cleaning_df(stock_df7, headlines7, twitter7)
gs_headlines_df, gs_twitter_df, gs_full_df = cleaning_df(stock_df8, headlines8, twitter8)
jnj_headlines_df, jnj_twitter_df, jnj_full_df = cleaning_df(stock_df9, headlines9, twitter9)
nvda_headlines_df, nvda_twitter_df, nvda_full_df = cleaning_df(stock_df10, headlines10, twitter10)
pfe_headlines_df, pfe_twitter_df, pfe_full_df = cleaning_df(stock_df11, headlines11, twitter11)
nke_headlines_df, nke_twitter_df, nke_full_df = cleaning_df(stock_df12, headlines12, twitter12)
msft_headlines_df, msft_twitter_df, msft_full_df = cleaning_df(stock_df13, headlines13, twitter13)
stock_frames = [stock_df1, stock_df2, stock_df3, stock_df4, stock_df5, stock_df6, stock_df7, stock_df8, stock_df9, stock_df10, stock_df11, stock_df12, stock_df13]
full_stocks = pd.concat(stock_frames)
headline_frames = [tsla_headlines_df, amzn_headlines_df, aapl_headlines_df, goog_headlines_df,fb_headlines_df,nflx_headlines_df,cvx_headlines_df,gs_headlines_df,jnj_headlines_df,nvda_headlines_df,pfe_headlines_df,nke_headlines_df,msft_headlines_df]
full_headlines =
|
pd.concat(headline_frames)
|
pandas.concat
|
#Données extraite d'une : Etude du profil de la méthylation de l'ADN des cellules mononucléées du sang périphérique chez des personnes âgées sédentaires, sportives et des jeunes sédentaires.
#Data extracted: Study of the DNA methylation profile of peripheral blood mononuclear cells (PBMC) in sedentary, sporty and sedentary elderly people.
#Ouverture du référentiel illumina 450k
#Open the 450k illumina repository
class Read():
def __init__(self, fichier):
self.fichier = fichier
def lecture(self):
import csv
dataa =[]
f = open(self.fichier, "r")
f_csv = csv.reader(f,delimiter=" ")
data = list(f_csv)
data = data[38:]
for d in data:
for d1 in d:
r = d1.split("\t")
dataa.append(r)
return dataa
def liste_new (self, regex):
dic ={}
import re
da= self.lecture()
for d in da:
if d[21] =='':
d[21]= d[0]
for r in regex:
if re.search(r, d[21]) :
d[21] = d[21].replace('"','')
d[21] = d[21].split(";")
d[21]= d[21].pop()
d[21]="".join(d[21])
if d[21] in dic:
dic[d[21]]+="_"+d[0]
else:
dic[d[21]]= d[0]
for k,v in dic.items():
dic[k]= v.split("_")
return dic
#Ouverture sélective des dossiers des patients par la 1ère lettre grâce à la fonction glob.glob().
#Selective opening of patients' files by the first letter thanks to the function glob.glob ().
class Representation_genetique():
def __init__(self, groupe):
self.groupe = groupe
import os
import glob
self.liste_fichier =[]
chemin = os.getcwd()
for g in self.groupe:
liste = glob.glob(chemin+"/"+"["+g+"]"+"*.txt")
self.liste_fichier.append(liste)
#Transformation des données des patients sélectionnés en dictionnaire en fonction du referentiel illumina.
#Transformation of the data of the selected patients into a dictionary according to the illumina referential.
def dictionnaire_genetique(self, dico_r):
import csv
data1 = []
dico ={}
for fic in self.liste_fichier:
for fichier in fic:
f = open(fichier, "r")
f_csv = csv.reader(f,delimiter=" ")
data = list(f_csv)
data = data[4:]
for d in data:
for d1 in d:
r = d1.split("\t")
data1.append(r)
for k,v in dico_r.items():
for da in data1:
if da[0] in dico_r[k]:
if k in dico:
dico[k]+= "_"+da[1]
else:
dico[k]= da[1]
return dico
#Présentation sous forme de tableau
#Presentation in tabular form
def tableau(self, dictionnaire):
import pandas
for k, v in dictionnaire.items():
dictionnaire[k]= pandas.Series(v.split("_"), dtype = float )
array =
|
pandas.DataFrame(dictionnaire)
|
pandas.DataFrame
|
import pandas as pd
from scipy.io import arff
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, OrdinalEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV
dataset = pd.read_csv(r'..\..\data\breast-cancer-wisconsin\wdbc.data', header=None) # header=None, usecols=[3,6]
# print(dataset[1].value_counts())
dataset.pop(0)
y = LabelEncoder().fit_transform(dataset.pop(1).values)
si_step = ('si', SimpleImputer(strategy='constant', fill_value='MISSING'))
ohe_step = ('ohe', OneHotEncoder(sparse=False, handle_unknown='ignore'))
oe_step = ('le', OrdinalEncoder())
num_si_step = ('si', SimpleImputer(strategy='mean'))
sc_step = ('sc', StandardScaler())
cat_pipe = Pipeline([si_step, ohe_step])
num_pipe = Pipeline([num_si_step, sc_step])
bin_pipe = Pipeline([oe_step])
transformers = [
# ('cat', cat_pipe, ['DGN', 'PRE6', 'PRE14']),
('num', num_pipe, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29]),
# ('bin', bin_pipe, ['PRE7', 'PRE8', 'PRE9', 'PRE10', 'PRE11', 'PRE17', 'PRE19', 'PRE25', 'PRE30', 'PRE32']),
]
ct = ColumnTransformer(transformers=transformers)
# X_transformed = ct.fit_transform(dataset)
ml_pipe = Pipeline([
('X_transform', ct),
('mlp', MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(4, 3))),
])
kf = KFold(n_splits=5, shuffle=True)
# cv_score = cross_val_score(ml_pipe, dataset, y, cv=kf).mean()
param_grid = {
'X_transform__num__si__strategy': ['mean', 'median'],
'mlp__solver': ['sgd', 'adam', 'lbfgs'],
'mlp__alpha': [1e-1, 1e-3, 1e-5],
'mlp__hidden_layer_sizes': [(5, 2), (4, 3), (4, 4), (5, 5)],
'mlp__activation': ['identity', 'logistic', 'tanh', 'relu'],
}
knn_pipe = Pipeline([
('X_transform', ct),
('knn', KNeighborsClassifier(n_neighbors=8)),
])
ml_pipe.fit(dataset, y)
print(f'All data score: {ml_pipe.score(dataset, y)}')
knn_param_grid = {
'X_transform__num__si__strategy': ['mean', 'median'],
'knn__n_neighbors': range(1, 10),
}
gs = GridSearchCV(ml_pipe, param_grid, cv=kf)
gs.fit(dataset, y)
print(gs.best_params_)
print(gs.best_score_)
print(
|
pd.DataFrame(gs.cv_results_)
|
pandas.DataFrame
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pytest
from modelstore.models.prophet import MODEL_FILE, ProphetManager, save_model
from prophet import Prophet
from prophet.serialize import model_from_json, model_to_json
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
@pytest.fixture
def time_series_data():
now = datetime.now()
rows = []
for i in range(50):
rows.append({"ds": now + timedelta(days=i), "y": random.uniform(1, 10)})
return
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
def getcount(x):
return x.count()
def getset(x):
return len(set(x))
def FeatSection(data,label):
'Feature区间特征'
'用户领取优惠券次数及种类'
uc_cnt_set = pd.pivot_table(data,index='user_id',values='coupon_id',aggfunc=[getcount,getset]).reset_index()
uc_cnt_set.columns = ['user_id','uc_cnt','uc_set']
label = pd.merge(label,uc_cnt_set,on='user_id',how='left')
usecp = data[data['date'].isnull() == False] # 核销优惠券的数据
dropcp = data[data['date'].isnull() == True] # 不核销优惠券的数据
'用户核销优惠券次数及种类'
uusec_cnt_set = pd.pivot_table(usecp,index='user_id',values='coupon_id',aggfunc=[getcount,getset]).reset_index()
uusec_cnt_set.columns = ['user_id','uusec_cnt','uusec_set']
label = pd.merge(label,uusec_cnt_set,on='user_id',how='left')
'用户领取商家的种类'
um_set = pd.pivot_table(data,index='user_id',values='merchant_id',aggfunc=getset).reset_index()
um_set.columns = ['user_id','UM_set']
label = pd.merge(label,um_set,on='user_id',how='left')
'用户核销的商家种类'
uusem_set = pd.pivot_table(usecp,index='user_id',values='merchant_id',aggfunc=getset).reset_index()
uusem_set.columns = ['user_id','uusem_set']
label =
|
pd.merge(label,uusem_set,on='user_id',how='left')
|
pandas.merge
|
from datetime import datetime
import warnings
import pytest
import pandas as pd
import pyodbc
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, conversion, create
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
@pytest.fixture(scope="module")
def sample():
dataframe = pd.DataFrame(
{
"_varchar": [None, "b", "c", "4", "e"],
"_tinyint": [None, 2, 3, 4, 5],
"_smallint": [256, 2, 6, 4, 5], # tinyint max is 255
"_int": [32768, 2, 3, 4, 5], # smallint max is 32,767
"_bigint": [2147483648, 2, 3, None, 5], # int max size is 2,147,483,647
"_float": [1.111111, 2, 3, 4, 5], # any decicmal places
"_time": [str(datetime.now().time())]
* 5, # string in format HH:MM:SS.ffffff
"_datetime": [datetime.now()] * 4 + [pd.NaT],
"_empty": [None] * 5,
}
)
return dataframe
def test_table_errors(sql):
table_name = "##test_table_column"
with pytest.raises(KeyError):
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, primary_key_column="Z")
def test_table_column(sql):
table_name = "##test_table_column"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "A")
assert all(schema["sql_type"] == "varchar")
assert all(schema["is_nullable"] == True)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "string")
assert all(schema["odbc_type"] == pyodbc.SQL_VARCHAR)
assert all(schema["odbc_size"] == 0)
assert all(schema["odbc_precision"] == 0)
def test_table_pk(sql):
table_name = "##test_table_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "FLOAT"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_composite_pk(sql):
table_name = "##test_table_composite_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(5)", "C": "FLOAT"}
primary_key_column = ["A", "B"]
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, 2, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, False, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_pk_input_error(sql):
with pytest.raises(ValueError):
table_name = "##test_table_pk_input_error"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "DECIMAL(5,2)"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
sql_primary_key=True,
)
def test_table_sqlpk(sql):
table_name = "##test_table_sqlpk"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, sql_primary_key=True)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 2
assert all(schema.index == ["_pk", "A"])
assert all(schema["sql_type"] == ["int identity", "varchar"])
assert all(schema["is_nullable"] == [False, True])
assert all(schema["ss_is_identity"] == [True, False])
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA], index=["_pk", "A"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True])
assert all(schema["pandas_type"] == ["Int32", "string"])
assert all(schema["odbc_type"] == [pyodbc.SQL_INTEGER, pyodbc.SQL_VARCHAR])
assert all(schema["odbc_size"] == [4, 0])
assert all(schema["odbc_precision"] == [0, 0])
def test_table_from_dataframe_simple(sql):
table_name = "##test_table_from_dataframe_simple"
dataframe = pd.DataFrame({"ColumnA": [1]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "ColumnA")
assert all(schema["sql_type"] == "tinyint")
assert all(schema["is_nullable"] == False)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "UInt8")
assert all(schema["odbc_type"] == pyodbc.SQL_TINYINT)
assert all(schema["odbc_size"] == 1)
assert all(schema["odbc_precision"] == 0)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result.equals(dataframe)
def test_table_from_dataframe_datestr(sql):
table_name = "##test_table_from_dataframe_datestr"
dataframe = pd.DataFrame({"ColumnA": ["06/22/2021"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame({
'column_name': pd.Series(['ColumnA','_time_insert']),
'sql_type': pd.Series(['date','datetime2'], dtype='string'),
'is_nullable': pd.Series([False, True]),
'ss_is_identity': pd.Series([False, False]),
'pk_seq': pd.Series([None, None], dtype='Int64'),
'pk_name': pd.Series([None, None], dtype='string'),
'pandas_type': pd.Series(['datetime64[ns]', 'datetime64[ns]'], dtype='string'),
'odbc_type': pd.Series([pyodbc.SQL_TYPE_DATE, pyodbc.SQL_TYPE_TIMESTAMP], dtype='int64'),
'odbc_size': pd.Series([10, 27], dtype='int64'),
'odbc_precision': pd.Series([0, 7], dtype='int64'),
}).set_index(keys='column_name')
assert schema[expected.columns].equals(expected)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_errorpk(sql, sample):
with pytest.raises(ValueError):
table_name = "##test_table_from_dataframe_nopk"
sql.create.table_from_dataframe(table_name, sample, primary_key="ColumnName")
def test_table_from_dataframe_nopk(sql, sample):
table_name = "##test_table_from_dataframe_nopk"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key=None
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[True, True, False, False, True, False, False, True, True], dtype="bool"
),
"ss_is_identity": pd.Series([False] * 9, dtype="bool"),
"pk_seq": pd.Series([pd.NA] * 9, dtype="Int64"),
"pk_name": pd.Series([pd.NA] * 9, dtype="string"),
"pandas_type": pd.Series(
[
"string",
"UInt8",
"Int16",
"Int32",
"Int64",
"float64",
"timedelta64[ns]",
"datetime64[ns]",
"string",
],
dtype="string",
),
"odbc_type": pd.Series(
[
pyodbc.SQL_VARCHAR,
pyodbc.SQL_TINYINT,
pyodbc.SQL_SMALLINT,
pyodbc.SQL_INTEGER,
pyodbc.SQL_BIGINT,
pyodbc.SQL_FLOAT,
pyodbc.SQL_SS_TIME2,
pyodbc.SQL_TYPE_TIMESTAMP,
pyodbc.SQL_WVARCHAR,
],
dtype="int64",
),
"odbc_size": pd.Series([0, 1, 2, 4, 8, 8, 16, 27, 0], dtype="int64"),
"odbc_precision":
|
pd.Series([0, 0, 0, 0, 0, 53, 7, 7, 0], dtype="int64")
|
pandas.Series
|
"""Bootstrapped_agregation
* :class:`.BaggingTress`
* :class:`.BaggingSVC`
"""
# data wrangling
import numpy as np
import pandas as pd
from collections import Counter
# models
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
# validation
from sklearn.metrics import balanced_accuracy_score, accuracy_score, f1_score, roc_auc_score
class BaggingTrees:
"""
Summary: Expand the subset of features in regards each node split for
-------- a more flexible tunning.
Parameters: - X_t : Training set features. (np.array)
---------- - Y_t : Training set labels. (np.array)
- X_v : Validation set features. (np.array)
- p : List of parameters values:
-- epochs: generates statiscal inferences as mean and std
-- n_trees: number of tress bagged
-- criterion: decision-tree sklearn library
-- min_samples_leaf: decision-tree sklearn library
-- max_depth: decision-tree sklearn library
-- min_samples_splits: decision-tree sklearn library
-- max_leaf_nodes: decision-tree sklearn library
Output: - y_pred: predictions on validation set X_v (array)
------- - unan_rates: rate of majority votes (array)
- acc: accuracy on training set Y_t (integer)
- f1: f1 score on training set Y_t (integer)
Example: model = BaggingTrees(p)
-------- model.fit(x_t, y_t)
predictions = model.predict(x_v)
votes_percentages = model.votes
"""
def __init__(self, p):
# store parameters
self.epochs = p[0]; self.n_trees = p[1]
self.criterion = p[2]; self.min_samples_leaf = p[3]
self.max_depth = p[4]; self.min_samples_splits = p[5]
self.max_leaf_nodes = p[6]
def fit(self, X_t, Y_t):
if isinstance(X_t,np.ndarray):
X_t = pd.DataFrame(X_t)
elif not isinstance(X_t,pd.core.frame.DataFrame):
raise Exception('Wrong type for X_t. Expected np.ndarray or pd.DataFrame')
if isinstance(Y_t,np.ndarray):
Y_t = pd.DataFrame(Y_t)
elif not isinstance(X_v,pd.core.frame.DataFrame):
raise Exception('Wrong type for Y_t. Expected np.ndarray or pd.DataFrame')
self.X_t_df = X_t.copy(); self.Y_t_df = Y_t.copy()
X_t['label'] = Y_t
train_df = X_t
for i in range(self.epochs):
self.bag = []
for run in np.arange(self.n_trees):
# resampling the dataframe (number of distinct, number of distinct)
train_df_bs = train_df.iloc[np.random.randint(len(train_df), size=len(train_df))]
X_train = train_df_bs.iloc[:,:-1]
Y_train = train_df_bs.iloc[:,-1:]
# Storing each trained tree
wl = DecisionTreeClassifier(criterion=self.criterion
, min_samples_leaf=self.min_samples_leaf
, max_depth=self.max_depth
, min_samples_split=self.min_samples_splits
, max_leaf_nodes=self.max_leaf_nodes).fit(X_train,Y_train)
#, random_state=run
# add tree into bag
self.bag.append(wl)
## Score on Training set
t_predictions = []
for i in range(self.n_trees):
tree_t_prediction = self.bag[i].predict(self.X_t_df) # predict validation and training sets
t_predictions.append(tree_t_prediction) # Append predictions
# Convert predictions lists into np.array to transpose them and obtain "n_tree" predictions per line
t_predictions_T = np.array(t_predictions).T
t_final_predictions = []
# for each entry "m" of X_t_df(m x features)
for line in t_predictions_T:
# countabilize the "n_tree" votes in v_predictions_T (m x n_tree)
most_common = Counter(line).most_common(1)[0][0]
t_final_predictions.append(most_common)
# accuracies values
self.acc = accuracy_score(self.Y_t_df, t_final_predictions)
self.f1 = f1_score(self.Y_t_df, t_final_predictions, average='macro')
self.bcr = balanced_accuracy_score(self.Y_t_df, t_final_predictions)
self.auc = roc_auc_score(self.Y_t_df, t_final_predictions, average='macro')
return
def predict(self, X_v):
if isinstance(X_v,np.ndarray):
X_v =
|
pd.DataFrame(X_v)
|
pandas.DataFrame
|
import unittest
import numpy as np
import pandas as pd
import dfauditor.extractor
import dfauditor.response
class TestExtractorNumeric(unittest.TestCase):
@classmethod
def setUpClass(cls):
numeric_data = {
'x': [50, 50, -10, 0, 0, 5, 15, -3, None, 0],
'y': [0.00001, 256.128, None, 16.32, 2048, -3.1415926535, 111, 2.4, 4.8, 0.0],
'trivial': [1]*10
}
cls.numeric_df = pd.DataFrame(numeric_data)
cls.mixed_df = None
def test_trivial(self):
resp = dfauditor.extractor.numeric(self.numeric_df.trivial)
expected = dfauditor.response.Numeric()
expected.__dict__ = {'attr': 'trivial',
'type': 'NUMERIC',
'median': 1.0,
'variance': 0.0,
'std': 0.0,
'max': 1,
'min': 1,
'mad': 0.0,
'p_zeros': 0.0,
'kurtosis': 0,
'skewness': 0,
'iqr': 0.0,
'range': 0,
'p_nan': 0.0,
'mean': 1.0}
self.assertEqual(resp.__dict__, expected.__dict__)
def test_nans(self):
"""
confirm that nans are adequately counted
:return:
"""
df_nan = self.numeric_df.trivial.copy()
# unlike `at`, `loc` changes the int type column to float (there is no NaN in panda-land float)
df_nan.loc[5] = np.nan
resp = dfauditor.extractor.numeric(df_nan)
self.assertAlmostEqual(10.0, resp.p_nan)
# this should agree with None too
df_none = self.numeric_df.trivial.copy()
df_none.loc[5] = None
df_none.loc[4] = None
resp = dfauditor.extractor.numeric(df_none)
self.assertAlmostEqual(20.0, resp.p_nan)
class TestExtractorString(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = {
'trivial': ['a'] * 10,
'mix': ['a'] * 3 + ['b'] * 2 + ['c'] + [None] * 4
}
cls.df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv1D, MaxPooling1D
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
# combine low and high expression examples
high_exp = pd.read_csv("high_exp.csv", index_col=0)
low_exp =
|
pd.read_csv("low_exp.csv", index_col=0)
|
pandas.read_csv
|
"""
Take all the methylation data created from the scWGBS pipeline and create a dataset which combine all the
patients and chromosome to one, next we will use this to create the data for a NN
We provide information about the pmd, the avg meth, the var in meth, the avg meth in normal cells for this
patient and across all patient and allow to filter based on another set of boundaries (not only pmd),
to provide list of cells to use and minimum number of samples per group of cells to be a valid cpg and a
ways to remove percentage of top and bottom coverage cpgs
"""
import argparse
import os
import sys
import numpy as np
import pandas as pd
import tqdm
sys.path.append(os.path.dirname(os.getcwd()))
sys.path.append(os.getcwd())
from commons import files_tools, consts, utils, sequence_tools
from format_files import handle_pmds
PATIENTS = ["CRC01", "CRC11", "CRC13", "CRC10"]
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--methylation_folder', help='Path to methylation files', required=True)
parser.add_argument('--windows_file', help='Path to files with windows we want to take',
required=False, default=None)
parser.add_argument('--nc_files', help='Path to nc files', required=True)
parser.add_argument('--cells_to_use', help='Path to cells to use file, default is all', required=False,
type=str, default=None)
parser.add_argument('--min_cells_threshold', help='minimum samples per cell group to be a valid '
'cpg', required=False, default=5, type=int)
parser.add_argument('--output_folder', help='Path of the output folder', required=False,
default=os.path.dirname(sys.argv[0]))
parser.add_argument('--coverage_perc_cpgs_to_remove', type=int, default=5, required=False,
help='Percentage of cells to remove based on extreme coverage(high and low)')
args = parser.parse_args()
return args
def get_chromosome_df(file_path, cells_to_use=None):
patient, chromosome = consts.DATA_FILE_SCWGBS_RE.findall(file_path)[0]
if patient not in PATIENTS:
return None, None, None
if cells_to_use is not None and patient not in cells_to_use:
return None, None, None
return patient, chromosome, pd.read_pickle(file_path)
def filter_chromosome_df(df, patient, chromosome, boundaries_data=None, min_cells_threshold=5,
cells_to_use=None, perc_of_cpg_to_remove_based_on_coverage=5):
"""
- filter out non-pmd cpg (and maybe add index)
- Filtered out again based on boundaries
- Only take non normal cells
:param df: The df
:param patient: name of the patient
:param chromosome: name of the chromosome
:param boundaries_data: The boundaries data, can be empty
:param cells_to_use: A dictionary mapping patient to a dictionary of groups of cells to use
{p1: {group1:[c1,c2,c3], group2:[c5,c8,c1]}, p2:...}
:param min_cells_threshold: A min number of cells needed to be a valid cpg in each cells to use
:param perc_of_cpg_to_remove_based_on_coverage: Percentage of cpg to remove from top and low coverage
:return: A new dictionary in the same format of the input but filtered by what is required
"""
# This need to be true
filtered_df = handle_pmds.filtered_out_non_pmd(df, chromosome)
filtered_df = utils.filter_df_based_on_region_name(filtered_df, region_name=utils.NOT_NC)
if boundaries_data:
filtered_df = utils.filter_df_based_on_tuple_list(filtered_df, boundaries_data)
if cells_to_use:
cells = set(list(cells_to_use[patient].values())[0]) & set(filtered_df.index)
if len(cells) == 0:
return None
group_coverage = np.sum(~
|
pd.isnull(filtered_df.loc[cells])
|
pandas.isnull
|
from operator import methodcaller
import numpy as np
import pandas as pd
from pandas.util import testing as tm
import pytest
import ibis
execute = ibis.pandas.execute
pytestmark = pytest.mark.pandas
@pytest.fixture(scope='session')
def sort_kind():
return 'mergesort'
default = pytest.mark.parametrize('default', [ibis.NA, ibis.literal('a')])
row_offset = pytest.mark.parametrize(
'row_offset', list(map(ibis.literal, [-1, 1, 0])))
delta_offset = pytest.mark.parametrize(
'delta_offset',
[ibis.day(), 2 * ibis.day(), -2 * ibis.day()]
)
@default
@row_offset
def test_lead(t, df, row_offset, default):
expr = t.dup_strings.lead(row_offset, default=default)
result = expr.execute()
expected = df.dup_strings.shift(-execute(row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@row_offset
def test_lag(t, df, row_offset, default):
expr = t.dup_strings.lag(row_offset, default=default)
result = expr.execute()
expected = df.dup_strings.shift(execute(row_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@delta_offset
def test_lead_delta(time_t, time_df, delta_offset, default):
expr = time_t.dup_strings.lead(delta_offset, default=default)
result = expr.execute()
expected = time_df.dup_strings.tshift(freq=-execute(delta_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
@default
@delta_offset
def test_lag_delta(time_t, time_df, delta_offset, default):
expr = time_t.dup_strings.lag(delta_offset, default=default)
result = expr.execute()
expected = time_df.dup_strings.tshift(freq=execute(delta_offset))
if default is not ibis.NA:
expected = expected.fillna(execute(default))
tm.assert_series_equal(result, expected)
def test_first(t, df):
expr = t.dup_strings.first()
result = expr.execute()
assert result == df.dup_strings.iloc[0]
def test_last(t, df):
expr = t.dup_strings.last()
result = expr.execute()
assert result == df.dup_strings.iloc[-1]
def test_group_by_mutate_analytic(t, df):
gb = t.groupby(t.dup_strings)
expr = gb.mutate(
first_value=t.plain_int64.first(),
last_value=t.plain_strings.last(),
avg_broadcast=t.plain_float64 - t.plain_float64.mean(),
delta=(t.plain_int64 - t.plain_int64.lag()) / (
t.plain_float64 - t.plain_float64.lag()
)
)
result = expr.execute()
gb = df.groupby('dup_strings')
expected = df.assign(
last_value=gb.plain_strings.transform('last'),
first_value=gb.plain_int64.transform('first'),
avg_broadcast=df.plain_float64 - gb.plain_float64.transform('mean'),
delta=(
(df.plain_int64 - gb.plain_int64.shift(1)) /
(df.plain_float64 - gb.plain_float64.shift(1))
)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_players(players, players_df):
lagged = players.mutate(pct=lambda t: t.G - t.G.lag())
result = lagged.execute()
expected = players_df.assign(
pct=players_df.G - players_df.groupby('playerID').G.shift(1)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_filter_mean(batting, batting_df):
expr = batting[batting.G > batting.G.mean()]
result = expr.execute()
expected = batting_df[batting_df.G > batting_df.G.mean()].reset_index(
drop=True
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_zscore(players, players_df):
expr = players.mutate(g_z=lambda t: (t.G - t.G.mean()) / t.G.std())
result = expr.execute()
gb = players_df.groupby('playerID')
expected = players_df.assign(
g_z=(players_df.G - gb.G.transform('mean')) / gb.G.transform('std')
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_avg_change_in_games_per_year(players, players_df):
expr = players.mutate(
delta=lambda t: (t.G - t.G.lag()) / (t.yearID - t.yearID.lag())
)
result = expr.execute()
gb = players_df.groupby('playerID')
expected = players_df.assign(
delta=(players_df.G - gb.G.shift(1)) / (
players_df.yearID - gb.yearID.shift(1)
)
)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.xfail(AssertionError, reason='NYI')
def test_batting_most_hits(players, players_df):
expr = players.mutate(
hits_rank=lambda t: t.H.rank().over(
ibis.cumulative_window(order_by=ibis.desc(t.H))
)
)
result = expr.execute()
hits_rank = players_df.groupby('playerID').H.rank(
method='min', ascending=False
)
expected = players_df.assign(hits_rank=hits_rank)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_quantile(players, players_df):
expr = players.mutate(hits_quantile=lambda t: t.H.quantile(0.25))
result = expr.execute()
hits_quantile = players_df.groupby('playerID').H.transform(
'quantile', 0.25
)
expected = players_df.assign(hits_quantile=hits_quantile)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.parametrize('op', ['sum', 'mean', 'min', 'max'])
def test_batting_specific_cumulative(batting, batting_df, op, sort_kind):
ibis_method = methodcaller('cum{}'.format(op))
expr = ibis_method(batting.sort_by([batting.yearID]).G)
result = expr.execute().astype('float64')
pandas_method = methodcaller(op)
expected = pandas_method(
batting_df[['G', 'yearID']].sort_values(
'yearID', kind=sort_kind).G.expanding()
).reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_batting_cumulative(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.cumulative_window(order_by=t.yearID)
)
)
result = expr.execute()
columns = ['G', 'yearID']
more_values = batting_df[columns].sort_values(
'yearID', kind=sort_kind).G.cumsum()
expected = batting_df.assign(more_values=more_values)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_cumulative_partitioned(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.cumulative_window(order_by=t.yearID, group_by=t.lgID)
)
)
result = expr.execute().more_values
columns = ['G', 'yearID', 'lgID']
key = 'lgID'
expected_result = batting_df[columns].groupby(
key, sort=False, as_index=False
).apply(lambda df: df.sort_values('yearID', kind=sort_kind)).groupby(
key, sort=False
).G.cumsum().sort_index(level=-1)
expected = expected_result.reset_index(
list(range(expected_result.index.nlevels - 1)),
drop=True
).reindex(batting_df.index)
expected.name = result.name
tm.assert_series_equal(result, expected)
def test_batting_rolling(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.trailing_window(5, order_by=t.yearID)
)
)
result = expr.execute()
columns = ['G', 'yearID']
more_values = batting_df[columns].sort_values(
'yearID', kind=sort_kind).G.rolling(5).sum()
expected = batting_df.assign(more_values=more_values)
tm.assert_frame_equal(result[expected.columns], expected)
def test_batting_rolling_partitioned(batting, batting_df, sort_kind):
expr = batting.mutate(
more_values=lambda t: t.G.sum().over(
ibis.trailing_window(3, order_by=t.yearID, group_by=t.lgID)
)
)
result = expr.execute().more_values
columns = ['G', 'yearID', 'lgID']
key = 'lgID'
expected_result = batting_df[columns].groupby(
key, sort=False, as_index=False
).apply(lambda df: df.sort_values('yearID', kind=sort_kind)).groupby(
key, sort=False
).G.rolling(3).sum().sort_index(level=-1)
expected = expected_result.reset_index(
list(range(expected_result.index.nlevels - 1)),
drop=True
).reindex(batting_df.index)
expected.name = result.name
|
tm.assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
import pandas as pd
import numpy as np
from skimage.io import MultiImage
from skimage.morphology import skeletonize
import maskslic as seg
import cv2
import matplotlib.pyplot as plt
from pathlib import Path
import warnings
VALID_SLIDE_EXTENSIONS = {'.tiff', '.mrmx', '.svs'}
# ~~~~~~~~~~~~ Helper functions ~~~~~~~~~~~~
def generateMetaDF(data_dir, meta_fn:str='train.csv'):
'''
Makes a pandas.DataFrame of paths out of a directory including slides. Drop the `train.csv` in `data_dir`
and the script will also merge any meta data from there on `image_id` key.
'''
all_files = [path.resolve() for path in Path(data_dir).rglob("*.*")]
slide_paths = [path for path in all_files if path.suffix in VALID_SLIDE_EXTENSIONS]
if len(slide_paths)==0:
raise ValueError('No slides in `data_dir`=%s'%data_dir)
data_df =
|
pd.DataFrame({'slide_path':slide_paths})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""csvmerge"""
import click
import numpy as np
import pandas as pd
import re
import sys
@click.command()
@click.option('-f1', '--from-file', required=True, type=click.Path(exists=True), help='input file to merge FROM (.csv). This is typically the smaller file.')
@click.option('-f2', '--to-file', required=True, type=click.Path(exists=True), help='input file to merge TO (.csv). This can be a very large file, as it is only streamed from disk, never stored completely in memory.')
@click.option('-k1', '--from-key', required=True, help='field to match in the FROM file')
@click.option('-k2', '--to-key', required=True, help='field to match in the TO file')
@click.option('-o', '--output-file', required=True, type=click.Path(exists=False), help='output file to write results (.csv)')
def main(from_file, to_file, from_key, to_key, output_file):
"""Join two csv files on specified keys and write the results."""
of = open(output_file, 'wt')
f1_df = read_file(from_file, from_key)
duns2row_f1 = {r[from_key]:r for i,r in f1_df.iterrows()}
print('read %d from %s' % (len(f1_df), from_file))
header = False
header1 = ['MATCH_%s' % c for i,c in enumerate(f1_df.columns)]
for row in iter_big_file(to_file, to_key):
header2 = [c for i,c in enumerate(row.index)]
if row[to_key] in duns2row_f1:
match = duns2row_f1[row[to_key]]
match = pd.DataFrame([match])
match.reset_index(inplace=True, drop=True)
row =
|
pd.DataFrame([row])
|
pandas.DataFrame
|
from bokeh.embed import components
from bokeh.plotting import figure, curdoc, ColumnDataSource
from bokeh.resources import INLINE
from bokeh.util.string import encode_utf8
from bokeh.models import CustomJS, LabelSet, Slider
from bokeh.models.widgets import Slider
from bokeh.models.layouts import WidgetBox, Row
from bokeh.layouts import row, widgetbox
from werkzeug import secure_filename
from flask import Flask, render_template, flash, request, redirect, url_for, session
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
import os
from forms import *
import pandas as pd
import plotly
import plotly.plotly as py
import json
import numpy as np
from pandas import ExcelWriter
from pandas import ExcelFile
DEBUG = True
app = Flask(__name__) #initialising flask
app.config.from_object(__name__) #configuring flask
app.config['SECRET_KEY'] = '<KEY>'
CURRENT_YEAR = '2015'
data = pd.read_csv("test_data.csv")
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import scanpy as sc
from termcolor import colored
import time
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import seaborn as sns
from pyVIA.core import *
def cellrank_Human(ncomps=80, knn=30, v0_random_seed=7):
import scvelo as scv
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
ad = scv.read_loom('/home/shobi/Downloads/Human Hematopoietic Profiling homo_sapiens 2019-11-08 16.12.loom')
print(ad)
# ad = sc.read('/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# ad.obs['nover_label'] = nover_labels
print('start cellrank pipeline', time.ctime())
# scv.utils.show_proportions(ad)
scv.pl.proportions(ad)
scv.pp.filter_and_normalize(ad, min_shared_counts=20, n_top_genes=2000)
sc.tl.pca(ad, n_comps=ncomps)
n_pcs = ncomps
print('npcs', n_pcs, 'knn', knn)
sc.pp.neighbors(ad, n_pcs=n_pcs, n_neighbors=knn)
sc.tl.louvain(ad, key_added='clusters', resolution=1)
scv.pp.moments(ad, n_pcs=n_pcs, n_neighbors=knn)
scv.tl.velocity(ad)
scv.tl.velocity_graph(ad)
scv.pl.velocity_embedding_stream(ad, basis='umap', color='nover_label')
def adata_preprocess(adata, n_top_genes=1000, log=True):
# this is a lot like the steps for scvelo.pp.filter_and_normalize() which also allows selection of top genes (see Pancreas)
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count#1
# print(adata)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell #same as normalize_total()
adata, key_n_counts='n_counts_all'
)
# select highly-variable genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
total = adata.X
total = total.sum(axis=0).transpose()
total = pd.DataFrame(total.transpose())
print('total')
print(total.shape)
#total = total.sum(axis=0).transpose()
total.columns = [i for i in adata.var_names]
print(total)
total.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/library_counts_500hvg.csv')
sc.pp.scale(adata, max_value=10)
from sklearn.decomposition import PCA
pca = PCA(n_components=499) # estimate only 2 PCs
X_new = pca.fit_transform(adata.X)
print('variance explained')
print(pca.explained_variance_ratio_)
print('pca.components_ shape ncomp x nfeat')
print()
df = pd.DataFrame(abs(pca.components_))
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print('done saving')
'''
# sc.pp.scale(adata, max_value=10)zheng scales after the log, but this doesnt work well and is also not used in scvelo.pp.filter_and_normalize
return adata
def main_Human(ncomps=80, knn=30, v0_random_seed=7, run_palantir_func=False):
'''
df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print(df)
df = df.set_index('Unnamed: 0')
print(df)
df = df.sort_values(by='totals', axis=1, ascending = False)
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_sorted_500hvg.csv')
print('saved')
'''
import random
random.seed(100)
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC (cDC)',
'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
# NOTE: Myeloid DCs are now called Conventional Dendritic Cells cDCs
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
df_nover = pd.DataFrame(nover_labels)
# df_nover.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/noverLabelsforMonocle.csv')
print('save nover')
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
palantir_tsne_df = pd.DataFrame(tsnem)
# palantir_tsne_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/palantir_tsne.csv')
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw.X: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(ad.X)
print(ad.raw.X.shape)
# df_X = pd.DataFrame(ad.raw.X.todense(), columns = ad.var_names)
# df_X.columns = [i for i in ad.var_names]
# print('starting to save .X')
# df_X.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/expression_matrix_raw.csv")
print('finished save .X')
# (ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
adata_counts_raw = sc.AnnData(ad.raw.X)
adata_counts_raw.var_names = [i for i in ad.var_names]
# adata_counts_raw = adata_preprocess(adata_counts_raw, n_top_genes=500, log=True) # when using HVG and no PCA
# sc.tl.pca(adata_counts_raw,svd_solver='arpack', n_comps=ncomps)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = [
'ITGAX'] # ['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
# 'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
true_label = nover_labels # revised_clus
root_user = [4823]
print('v0 random seed', v0_random_seed)
# df_temp_write = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
# df_temp_write.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/Human_CD34_200PCA.csv")
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.X.todense()
print(time.ctime())
print(time.ctime())
v0 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.2,
root_user=root_user, dataset='humanCD34', preserve_disconnected=True, random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, pseudotime_threshold_TS=10,
neighboring_terminal_states_threshold=3) # *.4 root=1,
v0.run_VIA()
v0.make_JSON(filename='scRNA_Hema_temp.js')
super_labels = v0.labels
print('starting to save selected genes')
genes_save = ['ITGAX', 'GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA',
'ITGAX', 'IGHD',
'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
df_selected_genes = pd.DataFrame(adata_counts.X, columns=[cc for cc in adata_counts.var_names])
df_selected_genes = df_selected_genes[genes_save]
# df_selected_genes.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/selected_genes.csv")
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
print('start magic')
gene_list_magic = ['IL3RA', 'IRF8', 'GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B', 'SPI1', 'CD34', 'CSF1R', 'ITGAX']
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=gene_list_magic)
df_magic_cluster = df_magic.copy()
df_magic_cluster['parc'] = v0.labels
df_magic_cluster = df_magic_cluster.groupby('parc', as_index=True).mean()
print('end magic', df_magic.shape)
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v0.draw_piechart_graph(ax, ax1, type_pt='gene', gene_exp=df_magic_cluster['GATA1'].values, title='GATA1')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
draw_trajectory_gams(tsnem, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, v0.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=str(Xin.shape[1]))
plt.show()
print('super labels', set(super_labels))
ad.obs['via0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['via0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA', 'ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
sc.pl.matrixplot(magic_ad, marker_genes, groupby='via0_label', dendrogram=True)
'''
sc.tl.rank_genes_groups(ad, groupby='via0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="via0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='via0_label', n_genes = 3) # plot the result
'''
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34', 'GATA1', 'IL3RA']: # ,'SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
plt.show()
super_edges = v0.edgelist_maxout # v0.edgelist
tsi_list = get_loc_terminal_states(v0, Xin)
v1 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.95, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=v0.terminal_clusters, is_coarse=False, full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
random_seed=v0_random_seed, pseudotime_threshold_TS=10) # *.4super_terminal_cells = tsi_list #3root=1,
v1.run_VIA()
labels = v1.labels
v1.make_JSON(filename='scRNA_Hema_via1_temp.js')
df_magic_cluster = df_magic.copy()
df_magic_cluster['via1'] = v1.labels
df_magic_cluster = df_magic_cluster.groupby('via1', as_index=True).mean()
# print('df_magic_cluster', df_magic_cluster)
'''
#Get the clustsergraph gene expression on topology
for gene_i in gene_list_magic:
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v1.draw_piechart_graph(ax,ax1,type_pt='gene', gene_exp = df_magic_cluster[gene_i].values, title = gene_i)
plt.show()
'''
ad.obs['parc1_label'] = [str(i) for i in labels]
'''
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in v1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = v0.knn_struct.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=v1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
# print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # v1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34']: # ['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
# v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name] + 'VIA MAGIC')
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov)[idx])
# graph_hnsw = v0.knngraph_visual()
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# phate_op = phate.PHATE()
# embedding = phate_op.fit_transform(adata_counts.obsm['X_pca'][:, 0:20])
# embedding = embedding[idx, :]
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
# DRAW EVOLUTION PATHS
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Toy_comparisons(ncomps=10, knn=30, random_seed=42, dataset='Toy3', root_user='M1',
foldername="/home/shobi/Trajectory/Datasets/Toy3/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
# root_user = ["T1_M1", "T2_M1"] # "M1" # #'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy3":
print('dataset Toy3')
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv",
delimiter=",")
#df_counts = pd.read_csv(foldername + "Toy3_noise_100genes_thinfactor8.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
print('df_ids', df_ids.columns)
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C107'
if dataset == "Toy4": # 2 disconnected components
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "Toy4_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
print(df_counts.shape, 'df_counts shape')
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T2_M1'
palantir_root = 'C107'
if dataset == "Connected":
df_counts = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000.csv", delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected/ToyConnected_M9_n2000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1'
if dataset == "Connected2":
df_counts = pd.read_csv(foldername + "Connected2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected2_noise_500genes.csv", 'rt',delimiter=",")
df_ids = pd.read_csv(foldername + "Connected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C11'
# suggest to use visual jaccard pruning of 1 (this doesnt alter underlying graph, just the display. can also use "M2" as the starting root,
if dataset == "ToyMultiM11":
df_counts = pd.read_csv(foldername + "Toymulti_M11_n3000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyMulti_M11_noised.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "Toymulti_M11_n3000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv( "/home/shobi/Trajectory/Datasets/ToyMultifurcating_M11/Toymulti_M11_n3000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1005'
if dataset == "Cyclic": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_noise_100genes_thinfactor3.csv",
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1'
if dataset == "Cyclic2": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/ToyCyclic2_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C107'
if dataset == 'Bifurc2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/ToyBifurc2_noised.csv", delimiter=",")
df_ids = pd.read_csv( "/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000_ids_with_truetime.csv",delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1006'
if dataset == 'Disconnected2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/ToyDisconnected2_noise_500genes.csv",
delimiter=",")
df_ids = pd.read_csv(
"/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv")
root_user = ['T1_M1', 'T1_M2', 'T1_M3'] # 'T1_M1'
paga_root = 'T1_M1'
palantir_root = 'C125'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
# df_ids.to_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000_ids_sorted_with_truetime.csv")
# df_counts['group_id'] = df_ids['group_id']#to split Toy4
# df_counts['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_ids['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_counts = df_counts[df_counts['main_Traj']=='T2']#to split Toy4
# df_ids = df_ids[df_ids['main_Traj'] == 'T2']#to split Toy4
#true_time = df_ids['true_time']
true_label = df_ids['group_id'].tolist()
# df_counts = df_counts.drop('main_Traj', 1)#to split Toy4
# df_counts = df_counts.drop('group_id', 1)#to split Toy4
# df_ids = df_ids.reset_index(drop=True)#to split Toy4
# df_counts = df_counts.reset_index(drop=True)#to split Toy4
# true_label = df_ids['group_id'] #to split Toy4
print("shape", df_counts.index, df_ids.index)
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# comparisons
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
do_paga = False #
do_palantir = False #
# comparisons
if do_paga == True:
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X', ) # n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
# sc.tl.diffmap(adata_counts, n_comps=ncomps)
sc.tl.diffmap(adata_counts, n_comps=200) # default retains n_comps = 15
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0, random_state=10)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['leiden','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
df_paga = pd.DataFrame()
df_paga['paga_dpt'] = adata_counts.obs['dpt_pseudotime'].values
correlation = df_paga['paga_dpt'].corr(df_ids['true_time'])
print('corr paga knn', knn, correlation)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
# X = df_counts.values
'''
# palantir
if do_palantir == True:
print(palantir.__file__) # location of palantir source code
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts # palantir.preprocess.normalize_counts(counts)
# pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps) #normally use
pca_projections = counts
dm_res = palantir.utils.run_diffusion_maps(pca_projections, knn=knn,
n_components=300) ## n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
# c107 for T1_M1, C42 for T2_M1 disconnected
# C1 for M8_connected, C1005 for multi_M11 , 'C1006 for bifurc2'
pr_res = palantir.core.run_palantir(ms_data, early_cell=palantir_root, num_waypoints=500, knn=knn)
df_palantir = pd.read_csv(
'/home/shobi/Trajectory/Datasets/Toy3/palantir_pt.csv') # /home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
pt = df_palantir['pt']
correlation = pt.corr(true_time)
print('corr Palantir', correlation)
print('')
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=pca_projections.shape[1])
plt.show()
'''
# from sklearn.decomposition import PCA
# pca = PCA(n_components=ncomps)
# pc = pca.fit_transform(df_counts)
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts.X
if dataset == 'Toy4':
jac_std_global = .15 # .15
else:
jac_std_global = 0.15 # .15#0.15 #bruge 1 til cyclic2, ellers 0.15
#
v0 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via1 knn', knn, correlation)
labels = v1.labels
# v1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# v1.run_VIA()
# labels = v1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
embedding = adata_counts.obsm['X_pca'][idx,
0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = umap.UMAP().fit_transform(Xin) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
'''
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(Xin.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_i)
# knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Toy(ncomps=10, knn=30, random_seed=41, dataset='Toy3', root_user=['M1'],
cluster_graph_pruning_std=1., foldername="/home/shobi/Trajectory/Datasets/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
if dataset == "Toy3":
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['M1']
paga_root = "M1"
if dataset == "Toy4": # 2 disconnected components
print('inside toy4')
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T1_M1'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
# print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id'].tolist()
#true_time = df_ids['true_time']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# true_label =['a' for i in true_label] #testing dummy true_label
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
# via_wrapper(adata_counts, true_label, embedding= adata_counts.obsm['X_pca'][:,0:2], root=[1], knn=30, ncomps=10,cluster_graph_pruning_std = 1)
# print('starting via wrapper disconn')
# via_wrapper_disconnected(adata_counts, true_label, embedding=adata_counts.obsm['X_pca'][:, 0:2], root=[23,902], preserve_disconnected=True, knn=10, ncomps=10, cluster_graph_pruning_std=1 ,random_seed=41)
# print('end via wrapper disconn')
if dataset == 'Toy4':
jac_std_global = 0.15 # 1
else:
jac_std_global = 0.15
import umap
embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:, 0:10]) # 50
# embedding = adata_counts.obsm['X_pca'][:, 0:2]
# plt.scatter(embedding[:,0],embedding[:,1])
# plt.show()
print('root user', root_user)
v0 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed, piegraph_arrow_head_width=0.4,
piegraph_edgeweight_scalingfactor=1.0) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
print('super labels', type(super_labels))
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
labels = v1.labels
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
n_downsample = 50
if len(labels) > n_downsample: # just testing the downsampling and indices. Not actually downsampling
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
embedding = embedding[idx, :]
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
# embedding = adata_counts.obsm['X_pca'][idx, 0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
'''
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax2.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
'''
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
v1.get_gene_expression(subset_, title_gene=gene_i)
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Bcell(ncomps=50, knn=20, random_seed=0, cluster_graph_pruning_std=.15,path='/home/shobi/Trajectory/Datasets/Bcell/'):
print('Input params: ncomp, knn, random seed', ncomps, knn, random_seed)
# https://github.com/STATegraData/STATegraData
def run_zheng_Bcell(adata, min_counts=3, n_top_genes=500, do_HVG=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
'''
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
'''
sc.pp.normalize_total(adata, target_sum=1e4)
if do_HVG == True:
sc.pp.log1p(adata)
'''
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False )
adata = adata[:, filter_result.gene_subset] # subset the genes
'''
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, min_mean=0.0125, max_mean=3,
min_disp=0.5) # this function expects logarithmized data
print('len hvg ', sum(adata.var.highly_variable))
adata = adata[:, adata.var.highly_variable]
sc.pp.normalize_per_cell(adata) # renormalize after filtering
# if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
if do_HVG == False: sc.pp.log1p(adata)
sc.pp.scale(adata, max_value=10) # scale to unit variance and shift to zero mean
return adata
'''
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.columns = [i for i in ad.var_names]
# print('norm df', norm_df_pal)
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c42' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=ncomps)
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Ldha', 'Foxo1', 'Lig4'] # , 'Slc7a5']#,'Slc7a5']#,'Sp7','Zfp629']
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, Bcell_marker_gene_list])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
'''
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def find_time_Bcell(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID_Bcell(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv(path + 'genes_count_table.txt', sep='\t')
gene_name = pd.read_csv(path + 'genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
time_list = [find_time_Bcell(s) for s in Bcell_columns]
print('time list set', set(time_list))
adata_counts.obs['TimeStamp'] = [str(tt) for tt in time_list]
ID_list = [find_cellID_Bcell(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
print('timelist', list(set(time_list)))
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
small_large_gene_list = ['Kit', 'Pcna', 'Ptprc', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b', 'Mme',
'Spn']
list_var_names = [s for s in adata_counts_unfiltered.var_names]
matching = [s for s in list_var_names if "IgG" in s]
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
for gene_name in small_large_gene_list:
print('looking at small-big list')
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
# diff_list = [i for i in diff_list if i in list_var_names] #based on paper STable1 https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.2006506#pbio.2006506.s007
# adata_counts = adata_counts[:,diff_list] #if using these, then set do-HVG to False
print('adata counts difflisted', adata_counts.shape)
adata_counts = run_zheng_Bcell(adata_counts, n_top_genes=5000, min_counts=30,
do_HVG=True) # 5000 for better ordering
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
# (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
marker_genes = {"small": ['Rag2', 'Rag1', 'Pcna', 'Myc', 'Ccnd2', 'Cdkn1a', 'Smad4', 'Smad3', 'Cdkn2a'],
# B220 = Ptprc, PCNA negative for non cycling
"large": ['Ighm', 'Kit', 'Ptprc', 'Cd19', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b'],
"Pre-B2": ['Mme', 'Spn']} # 'Cd19','Cxcl13',,'Kit'
print('make the v0 matrix plot')
mplot_adata = adata_counts_unfiltered.copy() # mplot_adata is for heatmaps so that we keep all genes
mplot_adata = run_zheng_Bcell(mplot_adata, n_top_genes=25000, min_counts=1, do_HVG=False)
# mplot_adata.X[mplot_adata.X>10] =10
# mplot_adata.X[mplot_adata.X< -1] = -1
# sc.pl.matrixplot(mplot_adata, marker_genes, groupby='TimeStamp', dendrogram=True)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=200) # ncomps
# df_bcell_pc = pd.DataFrame(adata_counts.obsm['X_pca'])
# print('df_bcell_pc.shape',df_bcell_pc.shape)
# df_bcell_pc['time'] = [str(i) for i in time_list]
# df_bcell_pc.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PCs.csv')
# sc.pl.pca_variance_ratio(adata_counts, log=True)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
# sc.pl.heatmap(mplot_adata, var_names = small_large_gene_list,groupby = 'TimeStamp', dendrogram = True)
embedding = umap.UMAP(random_state=42, n_neighbors=15, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
df_umap = pd.DataFrame(embedding)
# df_umap.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_umap.csv')
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)[0]
ax4.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
if i == 0:
for xx in range(len(loc)):
poss = loc[xx]
ax4.text(embedding[poss, 0], embedding[poss, 1], 'c' + str(xx))
ax4.legend()
ax1.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Pcna'].X.flatten(), alpha=1)
ax1.set_title('Pcna, cycling')
ax2.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Vpreb1'].X.flatten(), alpha=1)
ax2.set_title('Vpreb1')
ax3.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Cd24a'].X.flatten(), alpha=1)
ax3.set_title('Cd24a')
# ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
#run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
input_via = adata_counts.obsm['X_pca'][:, 0:ncomps]
df_input = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
df_annot = pd.DataFrame(['t' + str(i) for i in true_label])
# df_input.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PC_5000HVG.csv')
# df_annot.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_annots.csv')
root_user = [42]
v0 = VIA(input_via, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
cluster_graph_pruning_std=cluster_graph_pruning_std,
root_user=root_user, preserve_disconnected=True, random_seed=random_seed,
do_impute_bool=True) # *.4#root_user = 34
v0.run_VIA()
super_labels = v0.labels
tsi_list = get_loc_terminal_states(via0=v0, X_input=adata_counts.obsm['X_pca'][:, 0:ncomps])
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, is_coarse=False,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=v0.terminal_clusters, random_seed=random_seed)
v1.run_VIA()
labels = v1.labels
super_edges = v0.edgelist
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4', 'Sp7', 'Zfp629'] # irf4 down-up
df_ =
|
pd.DataFrame(adata_counts_unfiltered.X)
|
pandas.DataFrame
|
"""
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import sys
import pandas
from mupit.open_ddd_data import standardise_ddd_de_novos, open_known_genes, \
get_ddd_rates
from mupit.gene_enrichment import analyse_enrichment
from mupit.write_json_probands import write_probands_by_gene
def get_options():
""" get the command line options.
"""
parser = argparse.ArgumentParser(description="script to analyse enrichment"
"of de novo mutations in genes in probands")
parser.add_argument("--rates",
help="Path to table of mutation rates.",
default="/lustre/scratch113/projects/ddd/users/jm33/de_novos.ddd_4k.mutation_rates.2015-11-24.txt")
parser.add_argument("--de-novos",
help="Path to DDD de novo dataset.",
default="/lustre/scratch113/projects/ddd/users/jm33/de_novos.ddd_4k.ddd_only.2015-11-24.txt")
parser.add_argument("--validations",
help="Path to validation results.",
default="/lustre/scratch113/projects/ddd/users/jm33/de_novos.validation_results.2015-11-24.txt")
parser.add_argument("--families",
help="Path to families PED file.",
default="/nfs/ddd0/Data/datafreeze/ddd_data_releases/2015-04-13/family_relationships.txt")
parser.add_argument("--trios",
help="Path to file listing complete trios.",
default="/nfs/ddd0/Data/datafreeze/ddd_data_releases/2015-04-13/trios.txt")
parser.add_argument("--known-genes",
help="path to table of known developmental disorder genes.",
default="/lustre/scratch113/projects/ddd/resources/ddd_data_releases/2015-04-13/DDG2P/dd_genes_for_clinical_filter")
parser.add_argument("--diagnosed", help="Path to diagnosed probands file.")
parser.add_argument("--skip-ddd", default=False, action="store_true",
help="whether to remove all the DDD probands, and run with the " \
"external subsets alone.")
parser.add_argument("--external-cohorts",
help="Path to table of proband counts in other published de novo datasets.")
parser.add_argument("--external-variants",
help="Path to table of de novo mutations in other published de novo datasets.")
parser.add_argument("--meta-subset",
help="Comma-separated list of phenotypes eg " \
"intellectual_disability,autism. This list determines the subset " \
"of external studies to include. (defaults to using all subsets, " \
"if the meta-analysis flag is also used).")
parser.add_argument("--out-manhattan", help="Path to put PDF of manhattan plot.")
parser.add_argument("--out-probands-by-gene",
help="Path to put json file of probands per gene.")
parser.add_argument("--out-enrichment",
help="Path to put file of enrichment testing results.")
parser.add_argument("--out-clustering",
help="Path to put file of enrichment testing results.")
args = parser.parse_args()
if not ((args.external_cohorts is None and args.external_variants is None) | \
(args.external_cohorts is not None and args.external_variants is not None)):
sys.exit("You have to either use both of the external-cohorts and "
"external-variants arguments, or not use either of them.")
return args
def count_trios(families_path, trios_path, diagnosed_path,
known_genes_path, meta_cohort=None, meta_variants=None, meta_subset=None, skip_ddd=False):
""" defines the cohort sizes, used to get the overall population size
Args:
families_path: path to DDD family relationships file, in ped format,
containing proband IDs and sex information
trios_path: path to table of probands in complete trios.
diagnosed_path: path to table of probands with diagnoses
known_genes_path: path to table of known developmental disorder genes
meta_cohort: path to table of counts of probands in external exome and
genome sequencing studies.
meta_variants: path to table of de novo mutations from external exome
and genome sequencing studies.
meta_subset: string of comma-separated list of phenotypes to include in
the meta-analysis, or None.
skip_ddd: boolean of whether to not use any DDD probands.
Returns:
dictionary with total counts of trios with male and female offspring
"""
male = 0
female = 0
if not skip_ddd:
ddd_male, ddd_female = count_ddd_trios(families_path, trios_path, diagnosed_path)
male += ddd_male
female += ddd_female
if meta_cohort is not None:
(external_male, external_female) = count_external_trios(meta_cohort,
meta_variants, known_genes_path, diagnosed_path, meta_subset)
male += external_male
female += external_female
return {"male": int(male), "female": int(female)}
def count_ddd_trios(families_path, trios_path, diagnosed_path):
""" count the male and female probands in the complete DDD trios
Args:
families_path: path to DDD family relationships file, in ped format,
containing proband IDs and sex information
trios_path: path to table of probands in complete trios.
diagnosed_path: path to table of probands with diagnoses
Returns:
tuple of male and female proband counts.
"""
# load proband information, then select the proband who have exome sequence
# available for both parents.
families = pandas.read_table(families_path, sep="\t")
trios = pandas.read_table(trios_path, sep="\t")
proband_ids = trios["proband_stable_id"]
probands = families[families["individual_id"].isin(proband_ids)]
# get the number of trios studied in our data for each sex
sex = probands["sex"].value_counts()
male = sex[["M"]]
female = sex[["F"]]
if diagnosed_path is not None:
# remove probands in DDD, unless we are not using the DDD probands.
diagnosed = pandas.read_table(diagnosed_path, sep="\t")
diagnosed = diagnosed[~diagnosed[["person_id", "sex"]].duplicated()]
male -= sum(diagnosed["sex"].isin(["Male", "male", "M", "m"]))
female -= sum(diagnosed["sex"].isin(["Female", "female", "F", "f"]))
return (male, female)
def count_external_trios(meta_cohort, meta_variants, known_genes_path, diagnosed, meta_subset=None):
""" defines the cohort sizes, used to get the overall population size
Args:
meta_cohort: path to table of counts of probands in external exome and
genome sequencing studies.
meta_variants: path to table of de novo mutations from external exome
and genome sequencing studies.
known_genes_path: path to table of known developmental disorder genes
remove_diagnosed: boolean of whether to remove probands with diagnostic
variants.
meta_subset: string of comma-separated list of phenotypes to include in
the meta-analysis, or None.
Returns:
tuple of male and female proband counts.
"""
cohorts =
|
pandas.read_table(meta_cohort, sep="\t")
|
pandas.read_table
|
# Silence infinite debug/warnings coming from deeppavlov & tf
# nltk uses plain old `print()` so can't silence...
import os
import logging
import warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.disable(logging.CRITICAL)
warnings.filterwarnings('ignore')
import json
import pandas as pd
import whoosh.index
from whoosh.qparser import MultifieldParser, OrGroup
from qa_query import BertSquad
# Force printout of all df columns in console output
pd.set_option('display.max_columns', None)
|
pd.set_option('display.expand_frame_repr', False)
|
pandas.set_option
|
"""
Created by: <NAME>
Sep 10
IEEE Fraud Detection Model
- Features from public kernel
- Shuffle = False
- lgbm
- Remove Low feature importance
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
import lightgbm as lgb
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.1
VERBOSE = 100
EARLY_STOPPING_ROUNDS = 100
RANDOM_STATE = 529
N_THREADS = 48
DEPTH = 14
N_FOLDS = 5
SHUFFLE = False
MODEL_TYPE = "lightgbm"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lightgbm':
EVAL_METRIC = 'auc'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = pd.read_csv(csv_file, index_col=[0])
except FileNotFoundError:
df = pd.DataFrame()
if integer:
value = round(value)
elif digits is not None:
value = round(value, digits)
if drop_incomplete_rows:
df = df.loc[~df['AUC'].isna()]
df.loc[run_id, field] = value # Model number is index
df.to_csv(csv_file)
update_tracking(run_id, "model_number", MODEL_NUMBER, drop_incomplete_rows=True)
update_tracking(run_id, "n_estimators", N_ESTIMATORS)
update_tracking(run_id, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
update_tracking(run_id, "random_state", RANDOM_STATE)
update_tracking(run_id, "n_threads", N_THREADS)
update_tracking(run_id, "learning_rate", LEARNING_RATE)
update_tracking(run_id, "n_fold", N_FOLDS)
update_tracking(run_id, "model_type", MODEL_TYPE)
update_tracking(run_id, "eval_metric", EVAL_METRIC)
update_tracking(run_id, "depth", DEPTH)
update_tracking(run_id, "shuffle", SHUFFLE)
#####################
# PREPARE MODEL DATA
#####################
folds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
logger.info('Loading Data...')
train_df = pd.read_parquet('../data/train_FE003.parquet')
test_df =
|
pd.read_parquet('../data/test_FE003.parquet')
|
pandas.read_parquet
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2016, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import os
import sys
import logging
import datetime
from dateutil import parser
from volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat
from volttron.platform.agent import utils
from volttron.platform.agent.utils import (get_aware_utc_now,
format_timestamp)
import pandas as pd
import statsmodels.formula.api as sm
utils.setup_logging()
_log = logging.getLogger(__name__)
class PGnEAgent(Agent):
def __init__(self, config_path, **kwargs):
super(PGnEAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
self.site = self.config.get('campus')
self.building = self.config.get('building')
self.temp_unit = self.config.get('temp_unit')
self.power_unit = self.config.get('power_unit')
self.out_temp_name = self.config.get('out_temp_name')
self.power_name = self.config.get('power_name')
self.aggregate_in_min = self.config.get('aggregate_in_min')
self.aggregate_freq = str(self.aggregate_in_min) + 'Min'
self.ts_name = self.config.get('ts_name')
self.window_size_in_day = int(self.config.get('window_size_in_day'))
self.min_required_window_size_in_percent = float(self.config.get('min_required_window_size_in_percent'))
self.interval_in_min = int(self.config.get('interval_in_min'))
self.no_of_recs_needed = 10 # self.window_size_in_day * 24 * (60 / self.interval_in_min)
self.min_no_of_records_needed_after_aggr = int(self.min_required_window_size_in_percent/100 *
self.no_of_recs_needed/self.aggregate_in_min)
self.schedule_run_in_sec = int(self.config.get('schedule_run_in_hr')) * 3600
# Testing
#self.no_of_recs_needed = 200
#self.min_no_of_records_needed_after_aggr = self.no_of_recs_needed/self.aggregate_in_min
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
self.core.periodic(self.schedule_run_in_sec, self.calculate_latest_coeffs)
def calculate_latest_coeffs(self):
unit_topic_tmpl = "{campus}/{building}/{unit}/{point}"
unit_points = [self.power_name]
df = None
#Get data
unit = self.temp_unit
for point in unit_points:
if point == self.power_name:
unit = self.power_unit
unit_topic = unit_topic_tmpl.format(campus=self.site,
building=self.building,
unit=unit,
point=point)
result = self.vip.rpc.call('platform.historian',
'query',
topic=unit_topic,
count=self.no_of_recs_needed,
order="LAST_TO_FIRST").get(timeout=10000)
df2 = pd.DataFrame(result['values'], columns=[self.ts_name, point])
df2[self.ts_name] = pd.to_datetime(df2[self.ts_name])
df2 = df2.groupby([pd.TimeGrouper(key=self.ts_name, freq=self.aggregate_freq)]).mean()
# df2[self.ts_name] = df2[self.ts_name].apply(lambda dt: dt.replace(second=0, microsecond=0))
df = df2 if df is None else
|
pd.merge(df, df2, how='outer', left_index=True, right_index=True)
|
pandas.merge
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'limin'
import pandas as pd
import datetime
from contextlib import closing
from tqsdk import TqApi, TqSim, TqBacktest, BacktestFinished, TargetPosTask
from tqsdk.tafunc import sma, ema2, trma
from sklearn.ensemble import RandomForestClassifier
pd.set_option('display.max_rows', None) # 设置Pandas显示的行数
pd.set_option('display.width', None) # 设置Pandas显示的宽度
'''
应用随机森林对某交易日涨跌情况的预测(使用sklearn包)
参考:https://www.joinquant.com/post/1571
'''
symbol = "SHFE.ru1811" # 交易合约代码
close_hour, close_minute = 14, 50 # 预定收盘时间(因为真实收盘后无法进行交易, 所以提前设定收盘时间)
def get_prediction_data(klines, n):
"""获取用于随机森林的n个输入数据(n为数据长度): n天中每天的特征参数及其涨跌情况"""
close_prices = klines.close[- 30 - n:] # 获取本交易日及以前的收盘价(此时在预定的收盘时间: 认为本交易日已收盘)
# 计算所需指标
sma_data = sma(close_prices, 30, 0.02)[-n:] # SMA指标, 函数默认时间周期参数:30
wma_data = ema2(close_prices, 30)[-n:] # WMA指标
mom_data = trma(close_prices, 30)[-n:] # MOM指标
x_all = list(zip(sma_data, wma_data, mom_data)) # 样本特征组
y_all = list(klines.close.iloc[i] >= klines.close.iloc[i - 1] for i in list(reversed(range(-1, -n - 1, -1)))) # 样本标签组
# x_all: 大前天指标 前天指标 昨天指标 (今天指标)
# y_all: (大前天) 前天 昨天 今天 -明天-
# 准备算法需要用到的数据
x_train = x_all[: -1] # 训练数据: 特征
x_predict = x_all[-1] # 预测数据(用本交易日的指标预测下一交易日的涨跌)
y_train = y_all[1:] # 训练数据: 标签 (去掉第一个数据后让其与指标隔一位对齐(例如: 昨天的特征 -> 对应预测今天的涨跌标签))
return x_train, y_train, x_predict
predictions = [] # 用于记录每次的预测结果(在每个交易日收盘时用收盘数据预测下一交易日的涨跌,并记录在此列表里)
api = TqApi(TqSim(), backtest=TqBacktest(start_dt=datetime.date(2018, 7, 2), end_dt=datetime.date(2018, 9, 26)))
quote = api.get_quote(symbol)
klines = api.get_kline_serial(symbol, duration_seconds=24 * 60 * 60) # 日线
target_pos = TargetPosTask(api, symbol)
with closing(api):
try:
while True:
while not api.is_changing(klines.iloc[-1], "datetime"): # 等到达下一个交易日
api.wait_update()
while True:
api.wait_update()
# 在收盘后预测下一交易日的涨跌情况
if api.is_changing(quote, "datetime"):
now = datetime.datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f") # 当前quote的时间
# 判断是否到达预定收盘时间: 如果到达 则认为本交易日收盘, 此时预测下一交易日的涨跌情况, 并调整为对应仓位
if now.hour == close_hour and now.minute >= close_minute:
# 1- 获取数据
x_train, y_train, x_predict = get_prediction_data(klines, 75) # 参数1: K线, 参数2:需要的数据长度
# 2- 利用机器学习算法预测下一个交易日的涨跌情况
# n_estimators 参数: 选择森林里(决策)树的数目; bootstrap 参数: 选择建立决策树时,是否使用有放回抽样
clf = RandomForestClassifier(n_estimators=30, bootstrap=True)
clf.fit(x_train, y_train) # 传入训练数据, 进行参数训练
predictions.append(bool(clf.predict([x_predict]))) # 传入测试数据进行预测, 得到预测的结果
# 3- 进行交易
if predictions[-1] == True: # 如果预测结果为涨: 买入
print(quote["datetime"], "预测下一交易日为 涨")
target_pos.set_target_volume(10)
else: # 如果预测结果为跌: 卖出
print(quote["datetime"], "预测下一交易日为 跌")
target_pos.set_target_volume(-10)
break
except BacktestFinished: # 回测结束, 获取预测结果,统计正确率
klines["pre_close"] = klines["close"].shift(1) # 增加 pre_close(上一交易日的收盘价) 字段
klines = klines[-len(predictions) + 1:] # 取出在回测日期内的K线数据
klines["prediction"] = predictions[:-1] # 增加预测的本交易日涨跌情况字段(向后移一个数据目的: 将 本交易日对应下一交易日的涨跌 调整为 本交易日对应本交易日的涨跌)
results = (klines["close"] - klines["pre_close"] >= 0) == klines["prediction"]
print(klines)
print("----回测结束----")
print("预测结果正误:\n", results)
print("预测结果数目统计: 总计", len(results),"个预测结果")
print(pd.value_counts(results))
print("预测的准确率:")
print((
|
pd.value_counts(results)
|
pandas.value_counts
|
#Creates temperature mean from Tmin and Tmax average
import sys
import numpy as np
import pandas as pd
import rasterio
from osgeo import gdal
from affine import Affine
from pyproj import Transformer
#NAMING SETTINGS & OUTPUT FLAGS----------------------------------------------#
MASTER_DIR = r'/home/hawaii_climate_products_container/preliminary/'
CODE_MASTER_DIR = MASTER_DIR + r'air_temp/daily/code/'
RUN_MASTER_DIR = MASTER_DIR + r'air_temp/data_outputs/'
MAP_OUTPUT_DIR = RUN_MASTER_DIR + r'tiffs/daily/county/' #Set subdirectories based on varname and iCode
SE_OUTPUT_DIR = RUN_MASTER_DIR + r'tiffs/daily/county/'
CV_OUTPUT_DIR = RUN_MASTER_DIR + r'tables/loocv/daily/county/'
TIFF_SUFFIX = '.tif'
SE_SUFFIX = '_se.tif'
CV_SUFFIX = '_loocv.csv'
NO_DATA_VAL = -9999
#END SETTINGS----------------------------------------------------------------#
#FUNCTION DEFINITION---------------------------------------------------------#
def get_Coordinates(GeoTiff_name):
# Read raster
with rasterio.open(GeoTiff_name) as r:
T0 = r.transform # upper-left pixel corner affine transform
A = r.read() # pixel values
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[2]), np.arange(A.shape[1]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation(0.5, 0.5)
# Function to convert pixel row/column index (from 0) to easting/northing
# at centre
def rc2en(r, c): return T1 * (c, r)
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(
rc2en, otypes=[
float, float])(
rows, cols)
transformer = Transformer.from_proj(
'EPSG:4326',
'+proj=longlat +datum=WGS84 +no_defs +type=crs',
always_xy=True,
skip_equivalent=True)
LON, LAT = transformer.transform(eastings, northings)
return LON, LAT
def get_island_df(tiff_name,varname):
lon,lat = get_Coordinates(tiff_name)
lon = lon.reshape(-1)
lat = lat.reshape(-1)
df_dict = {'LON':lon,'LAT':lat}
raster_img = rasterio.open(tiff_name)
raster_data = raster_img.read(1)
raster_mask = raster_img.read_masks(1)
raster_mask[raster_mask > 0] = 1
masked_array = raster_data * raster_mask
masked_array[raster_mask == 0] = np.nan
masked_array = masked_array.reshape(-1)
df_dict[varname] = masked_array
island_df =
|
pd.DataFrame.from_dict(df_dict)
|
pandas.DataFrame.from_dict
|
import pandas as pd
df_pub = pd.read_csv('tableS4.Refseq.assembly.summary.tsv',sep='\t')
df_pub['ATCC Catalog'] = df_pub['ATCC Catalog'].apply(lambda x: x.upper()) #set all catalogIDs to uppercase #count = 577
df_port = pd.read_csv('tableS4.atcc.assembly.summary.tsv',sep='\t')
portal_catalog = list(set(list((df_port['Base Catalog Number'])))) #get list of pcatalog IDs on the portal. MAY BE REDUNDANT
#plan: return ratio values of length, illumina n50, contig count, read depth, gc content - gc content not currently in public dataset
#int(df_pub[df_pub['ATCC Catalog'] == catalog]['Total Length']) / int(df_port[df_port['Base Catalog Number'] == catalog]['Total Length']) #need to remove redundancy problem
#ideas:
#separate into assembly level data frames - still has redundancies by catalog id.
#return average ratio values when catalog ID appears more than once
#Both of the above so values returned are collected by assembly level. Would need to prevent lower levels from repeating higher level assemblies per catalogID
#third option will be addressed below
#set public assembly dataframes
df_pub_complete = df_pub[(df_pub['Assembly Level'] == 'Chromosome') | (df_pub['Assembly Level'] == 'Complete Genome')] #count=234
pub_complete = [x for x in portal_catalog if x in list(df_pub_complete['ATCC Catalog'])] #get "complete" catalog IDs from public assemblies
df_pub_scaffold = df_pub[df_pub['Assembly Level'] == 'Scaffold'] #get down to just scaffolds
pub_scaffold = [x for x in portal_catalog if x in list(df_pub_scaffold['ATCC Catalog'])] #get list of scaffold catalogIDs #count = 107
df_pub_contig = df_pub[df_pub['Assembly Level'] == 'Contig'] #just contigs #should be unnecessary at this stage #count = 81
#totals of dataframes don't add up to top level dataframe count because of assemblies that exist at multiple stages. This approach should only compare the highest quality public assembly to ATCC's.
df_pub_comp_mean = df_pub_complete.groupby('ATCC Catalog').mean().reset_index() #summarize values by average
df_pub_comp_min = df_pub_complete.groupby('ATCC Catalog').min().reset_index() #summarize values by minimum (appropriate for contig counts)
df_pub_comp_max = df_pub_complete.groupby('ATCC Catalog').max().reset_index() #summarize values by maximum (appropriate for N50, perhaps)
df_pub_scaffold_mean = df_pub_scaffold.groupby('ATCC Catalog').mean().reset_index() #summarize values by average
df_pub_scaffold_min = df_pub_scaffold.groupby('ATCC Catalog').min().reset_index() #summarize values by minimum (appropriate for contig counts)
df_pub_scaffold_max = df_pub_scaffold.groupby('ATCC Catalog').max().reset_index() #summarize values by maximum (appropriate for N50, perhaps)
df_pub_contig_mean = df_pub_contig.groupby('ATCC Catalog').mean().reset_index() #summarize values by average
df_pub_contig_min = df_pub_contig.groupby('ATCC Catalog').min().reset_index() #summarize values by minimum (appropriate for contig counts)
df_pub_contig_max = df_pub_contig.groupby('ATCC Catalog').max().reset_index() #summarize values by maximum (appropriate for N50, perhaps)
#set portal assembly dataframes
df_port_complete = df_port
if len(df_port_complete) == len(portal_catalog):
print('Evaluating only top ranked assemblies')
if len(list(set(list(df_port_complete['Base Catalog Number'])))) == len(portal_catalog):
print('No Repeated Catalog IDs will be evaluated')
else:
print('There are Catalog IDs are present more than once in the top ranked assemblies, which will be averaged')
else:
port_cat_2 = list(df_port[df_port['Assembly Rank'] == 2]['Base Catalog Number']) #get all second rank assemblies to check that each assembly has a top ranking
if len([x for x in port_cat_2 if x not in list(df_port[df_port['Assembly Rank'] == 1]['Base Catalog Number'])]) == 0:
print('No second rank assemblies without a first rank assembly in the portal assembly list')
#comparisons
#'complete' public assemblies
#genome length #compared by average length #comparison metric could be command line argument and consistent across all fields.
port_length = df_port_complete[['Base Catalog Number', 'Total Length', 'Product Collection']]
pub_length = df_pub_complete[['ATCC Catalog', 'Total Length', 'Assembly Accession', 'Organism']]
port_length.columns = ['Catalog ID', 'Total Length', 'Product Collection']
pub_length.columns = ['Catalog ID', 'Total Length', 'Assembly Accession', 'Organism'] #need same column names to merge dataframes
print('public df length is: {}'.format(len(df_pub)))
print('pub_len length is: {}'.format(len(pub_length)))
print(len(port_length))
df_len = pd.merge(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal','_public'))
df_len['Length Ratio'] = df_len['Total Length_portal'] / df_len['Total Length_public']
df_len['Length Difference'] = df_len['Total Length_portal'] - df_len['Total Length_public']
df_len['Public Assembly Level'] = 'Complete'
df_len.to_csv('Assembly_Complete_length_no_aggregate.comparisons.txt',sep='\t',index=False)
print(len(df_len))
#N50 #compared by max N50
port_N50= df_port_complete[['Base Catalog Number', 'Filtered N50', 'Product Collection']]
pub_N50 = df_pub_complete[['ATCC Catalog', 'Scaffold N50', 'Assembly Accession', 'Organism']]
port_N50.columns = ['Catalog ID', 'N50', 'Product Collection']
pub_N50.columns = ['Catalog ID', 'N50', 'Assembly Accession', 'Organism']
df_N50 = pd.merge(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal','_public'))
df_N50['N50 Ratio'] = df_N50['N50_portal'] / df_N50['N50_public']
df_N50['N50 Difference'] = df_N50['N50_portal'] - df_N50['N50_public']
df_N50['Public Assembly Level'] = 'Complete'
df_N50.to_csv('Assembly_Complete_N50_no_aggregate.comparisons.txt',sep='\t',index=False)
#contig/replicon count #compared by min
port_contig= df_port_complete[['Base Catalog Number', 'Total Contigs', 'Product Collection']]
pub_contig = df_pub_complete[['ATCC Catalog', 'Scaffold Count', 'Assembly Accession', 'Organism']]
port_contig.columns = ['Catalog ID', 'Contig Count', 'Product Collection']
pub_contig.columns = ['Catalog ID', 'Contig Count', 'Assembly Accession', 'Organism']
df_contig =
|
pd.merge(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal','_public'))
|
pandas.merge
|
# coding: utf-8
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Unit tests for feature_engineering.py
"""
import pandas as pd
import pytest
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import f_regression
from atom.feature_engineering import (
FeatureExtractor, FeatureGenerator, FeatureSelector,
)
from atom.utils import to_df
from .utils import (
X10_dt, X10_str, X_bin, X_class, X_reg, X_sparse, y_bin, y_class, y_reg,
)
# Test FeatureExtractor ============================================ >>
def test_invalid_encoding_type():
"""Assert that an error is raised when encoding_type is invalid."""
with pytest.raises(ValueError, match=r".*the encoding_type parameter.*"):
FeatureExtractor(encoding_type="invalid").transform(X10_dt)
def test_invalid_features():
"""Assert that an error is raised when features are invalid."""
with pytest.raises(ValueError, match=r".*an attribute of pd.Series.dt.*"):
FeatureExtractor(features="invalid").transform(X10_dt)
def test_wrongly_converted_columns_are_ignored():
"""Assert that columns converted unsuccessfully are skipped."""
extractor = FeatureExtractor()
X = extractor.transform(X10_str)
assert "feature_3" in X.columns
def test_datetime_features_are_used():
"""Assert that datetime64 features are used as is."""
X = to_df(X10_dt.copy())
X["feature_3"] =
|
pd.to_datetime(X["feature_3"])
|
pandas.to_datetime
|
from __future__ import print_function
import sklearn
#%%
import lime
import os
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
import sklearn
import sklearn.ensemble
import sklearn.metrics
import seaborn as sns
from scipy.special import softmax
import matplotlib.pyplot as plt
from sklearn.utils import resample
from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import AlbertTokenizer, AlbertForSequenceClassification
import torch
import torch.nn as nn
import torch
#%%
import os
os.chdir('/home/ubuntu/transformers/Dataset/SST-2')
# %%
# Set Directory as appropiate
df_ST = pd.read_csv("original_data/train.tsv", sep="\t",nrows=1500)
sns.countplot(df_ST['label'])
plt.show()
#%%
zero = df_ST[df_ST.label==0]
one = df_ST[df_ST.label==1]
# downsample majority
one_u = resample(one,
replace=True, # sample with replacement
n_samples=len(zero), # match number with majority class
random_state=42) # reproducible results
# combine majority and upsampled minority
df_ST =
|
pd.concat([zero,one_u])
|
pandas.concat
|
import logging
import networkx as nx
import numpy as np
import pandas as pd
from skimage.feature import greycomatrix
from skimage.measure import shannon_entropy
from pyfibre.model.tools.analysis import (
tensor_analysis, angle_analysis)
from pyfibre.model.tools.feature import greycoprops_edit
from pyfibre.model.tools.filters import form_structure_tensor
from pyfibre.model.tools.utilities import bbox_sample
from pyfibre.utilities import IMAGE_MAX
logger = logging.getLogger(__name__)
STRUCTURE_METRICS = ['Angle SDI', 'Coherence', 'Local Coherence']
SHAPE_METRICS = ['Area', 'Eccentricity', 'Circularity', 'Coverage']
TEXTURE_METRICS = ['Mean', 'STD', 'Entropy']
FIBRE_METRICS = ['Waviness', 'Length']
NETWORK_METRICS = ['Degree', 'Eigenvalue', 'Connectivity',
'Cross-Link Density']
def _region_sample(region, metric):
"""Extract metric values for pixels within segment
Parameters
----------
region: skimage.RegionProperties
Region defining pixels within image to analyse
metric: array-like
Metric for all pixels in image to be analysed
"""
# Identify metrics for pixels within bounding box
metric = bbox_sample(region, metric)
# Return metrics only for pixels within segment
indices = np.where(region.image)
return metric[indices]
def structure_tensor_metrics(structure_tensor, tag=''):
"""Nematic tensor analysis for a scikit-image region"""
database = pd.Series(dtype=object)
(segment_coher_map,
segment_angle_map,
segment_angle_map) = tensor_analysis(structure_tensor)
# Calculate mean structure tensor elements
axis = tuple(range(structure_tensor.ndim - 2))
mean_tensor = np.mean(structure_tensor, axis=axis)
segment_coher, _, _ = tensor_analysis(mean_tensor)
database[f"{tag} Angle SDI"], _ = angle_analysis(
segment_angle_map, segment_coher_map)
database[f"{tag} Coherence"] = segment_coher[0]
database[f"{tag} Local Coherence"] = np.mean(segment_coher_map)
return database
def region_shape_metrics(region, tag=''):
"""Shape analysis for a scikit-image region"""
database = pd.Series(dtype=object)
# Perform all non-intensity image relevant metrics
database[f"{tag} Area"] = region.area
ratio = (np.pi * region.equivalent_diameter) / region.perimeter
database[f"{tag} Circularity"] = ratio
database[f"{tag} Eccentricity"] = region.eccentricity
database[f"{tag} Coverage"] = region.extent
# segment_hu = region.moments_hu
# database[f"{tag} Hu Moment 1"] = segment_hu[0]
# database[f"{tag} Hu Moment 2"] = segment_hu[1]
# database[f"{tag} Hu Moment 3"] = segment_hu[2]
# database[f"{tag} Hu Moment 4"] = segment_hu[3]
return database
def region_texture_metrics(region, image=None, tag='', glcm=False):
"""Texture analysis for a of scikit-image region"""
database = pd.Series(dtype=object)
# Check to see whether intensity_image is present or image argument
# has been supplied
if image is not None:
region_image = bbox_sample(region, image)
else:
region_image = region.intensity_image
# Obtain indices of pixels in region mask
indices = np.where(region.image)
intensity_sample = region_image[indices]
# _, _, database[f"{tag} Fourier SDI"] = (0, 0, 0)
# fourier_transform_analysis(segment_image)
database[f"{tag} Mean"] = np.mean(intensity_sample)
database[f"{tag} STD"] = np.std(intensity_sample)
database[f"{tag} Entropy"] = shannon_entropy(intensity_sample)
if glcm:
glcm = greycomatrix(
(region_image * region.image * IMAGE_MAX).astype('uint8'),
[1, 2], [0, np.pi/4, np.pi/2, np.pi*3/4], 256,
symmetric=True, normed=True)
glcm[0, :, :, :] = 0
glcm[:, 0, :, :] = 0
greycoprops = greycoprops_edit(glcm)
metrics = ["Contrast", "Homogeneity", "Energy",
"Entropy", "Autocorrelation", "Clustering",
"Mean", "Covariance", "Correlation"]
for metric in metrics:
value = greycoprops[metric.lower()].mean()
database[f"{tag} GLCM {metric}"] = value
return database
def network_metrics(network, network_red, n_fibres, tag=''):
"""Analyse networkx Graph object"""
database = pd.Series(dtype=object)
database['No. Fibres'] = n_fibres
cross_links = np.array(
[degree[1] for degree in network.degree],
dtype=int)
database[f"{tag} Network Cross-Link Density"] = (
(cross_links > 2).sum() / n_fibres)
try:
value = nx.degree_pearson_correlation_coefficient(
network, weight='r') ** 2
except Exception as err:
logger.debug(f'Network Degree calculation failed: {str(err)}')
value = None
database[f"{tag} Network Degree"] = value
try:
value = np.real(nx.adjacency_spectrum(network_red).max())
except Exception as err:
logger.debug(f'Network Eigenvalue calculation failed: {str(err)}')
value = None
database[f"{tag} Network Eigenvalue"] = value
try:
value = nx.algebraic_connectivity(network_red, weight='r')
except Exception as err:
logger.debug(f'Network Connectivity calculation failed: {str(err)}')
value = None
database[f"{tag} Network Connectivity"] = value
return database
def fibre_metrics(tot_fibres):
"""Analysis of list of `Fibre` objects
Parameters
----------
tot_fibres : list of `<class: Fibre>`
List of fibre to analyse
Returns
-------
database : DataFrame
Metrics calculated from networkx Graph and scikit-image
regionprops objects
"""
database = pd.DataFrame()
for fibre in tot_fibres:
fibre_series = fibre.generate_database()
database = database.append(
fibre_series, ignore_index=True)
return database
def fibre_network_metrics(fibre_networks):
"""Analysis of list of `FibreNetwork` objects
Parameters
----------
fibre_networks : list of `<class: FibreNetwork>`
List of fibre networks to analyse
Returns
-------
database : DataFrame
Metrics calculated from networkx Graph and scikit-image
regionprops objects
"""
database = pd.DataFrame()
for i, fibre_network in enumerate(fibre_networks):
# if segment.filled_area >= 1E-2 * image_shg.size:
fibre_network_series = pd.Series(name=i, dtype=object)
metrics = fibre_network.generate_database()
fibre_network_series = pd.concat(
(fibre_network_series, metrics))
database = database.append(
fibre_network_series, ignore_index=True)
return database
def segment_metrics(segments, image, image_tag=None, sigma=0.0001):
"""Analysis of a list of `BaseSegment` objects
Parameters
----------
segments : list of `<class: BaseSegment>`
List of cells to analyse
image: array-like
Full image to analyse
Returns
-------
database : DataFrame
Metrics calculated from scikit-image
regionprops objects
"""
database = pd.DataFrame()
structure_tensor = form_structure_tensor(image, sigma)
for index, segment in enumerate(segments):
segment_series = segment.generate_database(
image_tag=image_tag)
if image_tag is not None:
tensor_tag = ' '.join([segment.tag, 'Segment', image_tag])
else:
tensor_tag = ' '.join([segment.tag, 'Segment'])
# Only use pixel tensors in segment
segment_tensor = _region_sample(
segment.region, structure_tensor)
nematic_metrics = structure_tensor_metrics(
segment_tensor, tensor_tag)
segment_series =
|
pd.concat((segment_series, nematic_metrics))
|
pandas.concat
|
import string
from itertools import product
import numpy as np
from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
import pandas as pd
from .pandas_vb_common import setup # noqa
class Melt(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
self.df['id1'] = np.random.randint(0, 10, 10000)
self.df['id2'] = np.random.randint(100, 1000, 10000)
def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
class Pivot(object):
goal_time = 0.2
def setup(self):
N = 10000
index = date_range('1/1/2000', periods=N, freq='h')
data = {'value': np.random.randn(N * 50),
'variable': np.arange(50).repeat(N),
'date': np.tile(index.values, 50)}
self.df = DataFrame(data)
def time_reshape_pivot_time_series(self):
self.df.pivot('date', 'variable', 'value')
class SimpleReshape(object):
goal_time = 0.2
def setup(self):
arrays = [np.arange(100).repeat(100),
np.roll(np.tile(np.arange(100), 100), 25)]
index =
|
MultiIndex.from_arrays(arrays)
|
pandas.MultiIndex.from_arrays
|
import pandas as pd
import numpy as np
import pdb
import os
#load metadata
metadata = pd.read_csv("../../metadata/lake_metadata.csv")
#trim to observed lakes
metadata = metadata[metadata['num_obs'] > 0]# obs = pd.read_feather("../../data/raw/obs/surface_lake_temp_daily_020421.feather")
obs = pd.read_csv("../../data/raw/obs/lake_surface_temp_obs.csv")
site_ids = np.unique(obs['site_id'].values)
print(len(site_ids), ' lakes')
# site_ids = metadata['site_id'].values #CHANGE DIS----------
n_folds = 5
combined_df = pd.DataFrame()
combined_lm = pd.DataFrame()
combined_gb = pd.DataFrame()
combined_ea = pd.DataFrame()
folds_arr = np.arange(n_folds)+1
for k in folds_arr:
print("fold ",k)
lm_df = pd.read_feather("../../results/bachmann_fold"+str(k)+"_all_season.feather")
ea_df = pd.read_feather("../../results/err_est_outputs_072621_EALSTM_fold"+str(k)+"_oversamp_norm2.feather")
ea_df = pd.merge(ea_df,lm_df,left_on=['Date','site_id'],right_on=['date','site_id'],how='left')
combined_ea = combined_ea.append(ea_df)
combined_ea.reset_index(inplace=True,drop=True)
combined_df['Date'] = combined_ea['Date']
combined_df['site_id'] = combined_ea['site_id']
combined_df['wtemp_predicted-ealstm'] = combined_ea['wtemp_predicted']
combined_df['wtemp_predicted-linear_model'] = combined_ea['temp_pred_lm']
combined_df['wtemp_actual'] = combined_ea['wtemp_actual']
combined_df.reset_index(inplace=True)
combined_df.to_feather("../../results/all_outputs_and_obs_wBachmann_allSeason.feather")
combined_df.to_csv("../../results/all_outputs_and_obs_wBachmann_allSeason.csv")
combined_df = pd.read_feather("../../results/all_outputs_and_obs_wBachmann.feather")
per_site_df =
|
pd.DataFrame(columns=['site_id','n_obs','rmse_ealstm','rmse_lm'])
|
pandas.DataFrame
|
import pandas as pd
import math
import astropy as ast
import numpy as np
from astropy.time import Time
import matplotlib.pylab as plt
from astropy import units as u
from astropy.io import fits
import warnings
from lstchain.reco.utils import get_effective_time,add_delta_t_key
from lstchain.io.io import dl2_params_lstcam_key,dl2_params_src_dep_lstcam_key, get_srcdep_params
import os
class ReadFermiFile():
def __init__(self, file):
if 'fits' not in file:
raise ValueError('No FITS file provided for Fermi-LAT data')
else:
self.fname=file
def read_file(self):
f=fits.open(self.fname)
fits_table=f[1].data
return(fits_table)
def create_df_from_info(self,fits_table):
time=fits_table['BARYCENTRIC_TIME'].byteswap().newbyteorder()
phases=fits_table['PULSE_PHASE'].byteswap().newbyteorder()
energies=fits_table['ENERGY'].byteswap().newbyteorder()
dataframe = pd.DataFrame({"mjd_time":time,"pulsar_phase":phases,"dragon_time":time*3600*24,"energy":energies/1000})
dataframe=dataframe.sort_values(by=['mjd_time'])
self.info=dataframe
return(self.info)
def calculate_tobs(self):
diff=np.array(self.info['mjd_time'].to_list()[1:])-np.array(self.info['mjd_time'].to_list()[0:-1])
diff[diff>5/24]=0
return(sum(diff)*24)
def run(self):
print(' Reading Fermi-LAT data file')
ftable=self.read_file()
self.create_df_from_info(ftable)
self.tobs=self.calculate_tobs()
print(' Finishing reading. Total time is '+str(self.tobs)+' h'+'\n')
class ReadLSTFile():
def __init__(self, file=None, directory=None,src_dependent=False):
if file==None and directory==None:
raise ValueError('No file provided')
elif file is not None and directory is not None:
raise ValueError('Can only provide file or directory, but not both')
elif file is not None:
if 'h5' not in file:
raise ValueError('No hdf5 file provided for LST data')
else:
self.fname=file
elif directory is not None:
self.direc=directory
self.fname=[]
for x in os.listdir(self.direc):
rel_dir = os.path.relpath(self.direc)
rel_file = os.path.join(rel_dir, x)
if 'h5' in rel_file:
self.fname.append(rel_file)
self.fname.sort()
self.info=None
self.src_dependent=src_dependent
def read_LSTfile(self,fname,df_type='short'):
if self.src_dependent==False:
df=pd.read_hdf(fname,key=dl2_params_lstcam_key)
elif self.src_dependent==True:
srcindep_df=pd.read_hdf(fname,key=dl2_params_lstcam_key,float_precision=20)
on_df_srcdep=get_srcdep_params(fname,'on')
if 'reco_energy' in srcindep_df.keys():
srcindep_df.drop(['reco_energy'])
if 'gammaness' in srcindep_df.keys():
srcindep_df.drop(['gammaness'])
df = pd.concat([srcindep_df, on_df_srcdep], axis=1)
if df_type=='short':
if 'alpha' in df and 'theta2' in df:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","alpha","theta2","alt_tel"]]
elif 'alpha' in df and 'theta2' not in df:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","alpha","alt_tel"]]
elif 'theta2' in df and 'alpha' not in df:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","theta2","alt_tel"]]
else:
df_filtered=df[["mjd_time","pulsar_phase", "dragon_time","gammaness","alt_tel"]]
try:
df_filtered['energy']=df['reco_energy']
except:
df_filtered['energy']=df['energy']
else:
df_filtered = df
df_filtered['energy']=df['reco_energy']
df_filtered=add_delta_t_key(df_filtered)
return(df_filtered)
def calculate_tobs(self):
dataframe=add_delta_t_key(self.info)
return(get_effective_time(dataframe)[1].value/3600)
def run(self,pulsarana,df_type='long'):
print(' Reading LST-1 data file')
if isinstance(self.fname,list):
info_list=[]
for name in self.fname:
try:
info_file=self.read_LSTfile(name,df_type)
self.info=info_file
self.tobs=self.calculate_tobs()
pulsarana.cuts.apply_fixed_cut(self)
if pulsarana.cuts.energy_binning_cut is not None:
pulsarana.cuts.apply_energydep_cuts(self)
info_list.append(self.info)
except:
raise ValueError('Failing when reading:'+ str(name))
self.info=pd.concat(info_list)
self.tobs=self.calculate_tobs()
else:
self.info=self.read_LSTfile(self.fname,df_type)
self.tobs=self.calculate_tobs()
print(' Finishing reading. Total time is '+str(self.tobs)+' h')
pulsarana.cuts.apply_fixed_cut(self)
if pulsarana.cuts.energy_binning_cut is not None:
pulsarana.cuts.apply_energydep_cuts(self)
print(' Finishing filtering events:')
print(' gammaness cut:'+str(pulsarana.cuts.gammaness_cut))
print(' alpha cut:'+str(pulsarana.cuts.alpha_cut))
print(' theta2 cut:'+str(pulsarana.cuts.theta2_cut))
print(' zd cut:'+str(pulsarana.cuts.zd_cut))
print(' energy binning for the cuts:'+str(pulsarana.cuts.energy_binning_cut))
print('\n')
class ReadtxtFile():
def __init__(self, file,format_txt):
self.fname=file
self.format=format_txt
def read_file(self):
data = pd.read_csv(file, sep=" ", header=None)
return(data)
def check_format(self):
for name in ['t','p']:
if name not in self.format:
raise ValueError(' No valid format')
def create_df_from_info(self,df):
for i in range(0,len(self.format)):
if self.format[i]=='t':
times=df.iloc[:, i]
elif self.format[i]=='e':
energies=df.iloc[:, i]
elif self.format[i]=='p':
phases=df.iloc[:, i]
elif self.format[i]=='g':
gammaness=df.iloc[:, i]
elif self.format[i]=='a':
alphas=df.iloc[:, i]
elif self.format[i]=='t2':
theta2=df.iloc[:, i]
elif self.format[i]=='at':
alt_tel=df.iloc[:, i]
dataframe = pd.DataFrame({"mjd_time":times,"pulsar_phase":phases,"dragon_time":times*3600*24,"energy":energies})
try:
dataframe['gammaness']=gammaness
except:
pass
try:
dataframe['alpha']=alpha
except:
pass
try:
dataframe['theta2']=theta2
except:
pass
try:
dataframe['alt_tel']=alt_tel
except:
pass
dataframe=dataframe.sort_values(by=['mjd_time'])
self.info=dataframe
def calculate_tobs(self):
diff=np.array(self.info['mjd_time'].to_list()[1:])-np.array(self.info['mjd_time'].to_list()[0:-1])
return(sum(diff)*24)
def run(self):
data=self.read_file()
self.check_format()
self.create_df_from_info(data)
self.tobs=self.calculate_tobs()
print(' Finishing reading. Total time is '+str(self.tobs)+' s'+'\n')
class ReadList():
def __init__(self, phases_list, time_list=None,energy_list=None,tel='LST'):
self.plist=phases_list
self.tlist=time_list
self.elist=energy_list
self.tel=tel
def create_df_from_info(self):
dataframe =
|
pd.DataFrame({"mjd_time":self.tlist,"pulsar_phase":self.plist,"dragon_time":self.tlist*3600*24,"energy":self.elist})
|
pandas.DataFrame
|
import argparse
import collections
import os
import json
import logging
import re
import pathlib
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)s (%(levelname)s) %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('--db_path', type=str, default=None)
parser.add_argument('--input_folder', type=str, default=None)
parser.add_argument('--output_folder', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=int(1e4))
parser.add_argument(
'--method',
type=str,
choices=['codon_bias', 'tri_nucleotide_bias'],
default='codon_bias',
)
args = parser.parse_args()
db_path = args.db_path
input_folder = args.input_folder
output_folder = args.output_folder
batch_size = args.batch_size
method = args.method
if db_path is None:
db_path = os.path.join(os.getcwd(), 'data/db/seq.db')
if input_folder is None:
input_folder = os.path.join(os.getcwd(), 'data/Large_EBMC_Bact_DB')
if output_folder is None:
output_folder = os.path.join(os.getcwd(), f'data/domains/{method}')
engine = create_engine(f'sqlite+pysqlite:///{db_path}')
known_assemblies = set(pd.read_sql(
'select assembly_accession from assembly_source',
engine,
)['assembly_accession'].values)
process_batch = process_batch_fn(engine, known_assemblies, output_folder, method)
for batch_df in process_files(input_folder, known_assemblies, batch_size):
process_batch(batch_df)
logger.info('DONE')
def process_batch_fn(engine, known_assemblies, output_folder, method):
pfam_folder = os.path.join(output_folder, 'pfam')
tigr_folder = os.path.join(output_folder, 'tigr')
seen_tigr_assemblies = set()
seen_pfam_assemblies = set()
assembly_to_protein_ids = {}
assembly_to_top_protein_ids = {}
def fn(batch_df):
assemblies = set(batch_df.index.tolist())
matching_assemblies = sorted(assemblies & known_assemblies)
if len(matching_assemblies) == 0:
return
record_type = batch_df.iloc[0]['record_type']
for assembly_accession in matching_assemblies:
if assembly_accession not in assembly_to_protein_ids:
all_protein_ids, top_protein_ids = load_protein_ids(assembly_accession, method)
assembly_to_protein_ids[assembly_accession] = all_protein_ids
assembly_to_top_protein_ids[assembly_accession] = set(top_protein_ids)
all_protein_ids = assembly_to_protein_ids[assembly_accession]
top_protein_ids = assembly_to_top_protein_ids[assembly_accession]
df = batch_df.loc[[assembly_accession]].copy()
if len(df) == 0:
continue
df['below_threshold'] = df['protein_id'].apply(lambda p: p in top_protein_ids)
if record_type == 'pfam':
pfam_path = os.path.join(pfam_folder, f'{assembly_accession}_protein_domains.csv')
header = assembly_accession not in seen_pfam_assemblies
seen_pfam_assemblies.add(assembly_accession)
df.to_csv(pfam_path, index=True, mode='a', header=header)
elif record_type == 'tigr':
tigr_path = os.path.join(tigr_folder, f'{assembly_accession}_protein_domains.csv')
header = assembly_accession not in seen_tigr_assemblies
seen_tigr_assemblies.add(assembly_accession)
df.to_csv(tigr_path, index=True, mode='a', header=header)
return fn
def process_files(folder, known_assemblies, batch_size, skiplines=4, n_cols=19):
files = []
for p in pathlib.Path(folder).iterdir():
if p.is_file() and ('pfam' in p.name.lower() or 'tigr' in p.name.lower()):
files.append(p)
logger.info(f'Processing {len(files)} files')
p = r'\s+'.join([r'([^\s]+)' for _ in range(n_cols)])
pattern = f'^{p}$'
batch = []
n_records = 0
for i, path in enumerate(sorted(files, key=lambda p: p.name)):
logger.info(f'Processing file {i+1} / {len(files)}: {path.name}')
if 'pfam' in path.name.lower():
record_type = 'pfam'
else:
record_type = 'tigr'
line_nb = 0
with path.open() as f:
for line in f:
line_nb += 1
if line_nb < skiplines:
continue
m = re.match(pattern, line)
if m is None:
continue
n_records += 1
if n_records % int(1e5) == 0:
logger.info(f'{n_records:,} records processed')
row = [m[i+1] for i in range(n_cols)]
first_el = row[0]
a, genome_accession = tuple(first_el.split('$'))
if genome_accession not in known_assemblies:
continue
_, protein_id = tuple(a.split('@'))
protein_label = row[-1] if row[-1] != '-' else None
pfam_query = row[2]
pfam_accession = row[3]
data_row = [
genome_accession,
protein_id,
record_type,
pfam_query,
pfam_accession,
protein_label,
]
batch.append(data_row)
if len(batch) >= batch_size:
batch_df = prepare_batch(batch)
yield batch_df
batch = []
if len(batch) > 0:
yield prepare_batch(batch)
batch = []
if len(batch) > 0:
yield prepare_batch(batch)
logger.info(f'Total number of records: {n_records:,}')
return
def load_protein_ids(assembly_accession, method):
if method == 'codon_bias':
return load_codon_bias_protein_ids(assembly_accession)
else:
return load_tri_nucleotide_bias_protein_ids(assembly_accession)
def load_codon_bias_protein_ids(assembly_accession):
path_all = os.path.join(os.getcwd(), f'data/cds_codon_bias/all/{assembly_accession}_codon_bias.csv')
path_below = os.path.join(os.getcwd(), f'data/cds_codon_bias/below_threshold/{assembly_accession}_codon_bias.csv')
all_protein_ids = [
p_id.strip() for p_id in pd.read_csv(path_all)['protein_id'].unique()
if not pd.isnull(p_id)
]
top_protein_ids = [
p_id.strip() for p_id in pd.read_csv(path_below)['protein_id'].unique()
if not
|
pd.isnull(p_id)
|
pandas.isnull
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pyspark
import matplotlib.pyplot as plt
# In[131]:
from wordcloud import WordCloud
# In[2]:
import csv
# In[121]:
import numpy as np
# In[3]:
import pandas as pd
# In[4]:
from pyspark import SparkConf,SparkContext,SQLContext
# In[5]:
from pyspark import SparkConf,SparkContext,SQLContext
# In[6]:
conf=SparkConf()
# In[7]:
context=SparkContext(conf=conf)
# In[8]:
from pyspark.ml.feature import VectorAssembler
# In[9]:
from pyspark.ml.feature import FeatureHasher
# In[10]:
from pyspark.ml.feature import Tokenizer,HashingTF,IDF
# In[11]:
from pyspark.ml.classification import NaiveBayes, NaiveBayesModel
# In[12]:
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
# In[13]:
from pyspark.mllib.util import MLUtils
# In[14]:
from pyspark.ml import Pipeline
# In[15]:
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# In[16]:
from pyspark.sql.functions import col, lit
# In[17]:
sql=SQLContext(context)
# In[18]:
rddnew=context.textFile("/home/salman/Downloads/spamraw.csv")
# In[19]:
def vectorize_data(inputStr) :
attribute_split = inputStr.split(",")
spam_or_ham = 0.0 if attribute_split[0] == "ham" else 1.0
return [spam_or_ham, attribute_split[1]]
# In[20]:
vectorized = rddnew.map(vectorize_data)
# In[21]:
dfnew = sql.createDataFrame(vectorized, ["label", "message"])
# In[22]:
dfnew.show()
# In[43]:
(training_data, test_data) = dfnew.randomSplit([0.7, 0.3])
# In[100]:
df=dfnew.toPandas()
# In[101]:
spam_words = ' '.join(list(df[df['label'] == 1]['message']))
spam_wc = WordCloud(width = 512,height = 512).generate(spam_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(spam_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
# In[102]:
ham_words = ' '.join(list(df[df['label'] == 0]['message']))
ham_wc = WordCloud(width = 512,height = 512).generate(ham_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(ham_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
# In[44]:
tokenizer = Tokenizer(inputCol="message",outputCol="tokenized")
# In[45]:
hasher = HashingTF(inputCol = tokenizer.getOutputCol(), outputCol = "frequency")
# In[46]:
idf = IDF(inputCol = hasher.getOutputCol(), outputCol = "features")
# In[47]:
from pyspark.ml.classification import RandomForestClassifier
# In[48]:
from pyspark.ml.classification import LinearSVC
# In[49]:
from pyspark.ml.classification import NaiveBayes
# In[50]:
lsvc = LinearSVC(maxIter=10, regParam=0.1)
# In[51]:
rf = RandomForestClassifier(labelCol="label", featuresCol="features", numTrees = 100, maxDepth = 4, maxBins = 32)
# In[52]:
nb = NaiveBayes(smoothing=1.0, modelType="multinomial")
# In[53]:
pipelinerf = Pipeline(stages=[tokenizer,hasher,idf,rf])
# In[54]:
pipelinelsvc1 = Pipeline(stages=[tokenizer,hasher,idf,lsvc])
# In[55]:
pipelinenb = Pipeline(stages=[tokenizer,hasher,idf,nb])
# In[56]:
modelrf = pipelinerf.fit(training_data)
# In[57]:
modellsvcc = pipelinelsvc1.fit(training_data)
# In[58]:
modelnb = pipelinenb.fit(training_data)
# In[59]:
predictionrf = modelrf.transform(test_data)
# In[60]:
predictionlsvc= modellsvcc.transform(test_data)
# In[61]:
predictionnb = modelnb.transform(test_data)
# In[62]:
selectedrf = predictionrf.select("label", "message", "probability", "prediction")
# In[63]:
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction",
metricName="accuracy")
# In[64]:
accuracyrf = evaluator.evaluate(predictionrf)
# In[65]:
accuracylsvc= evaluator.evaluate(predictionlsvc)
# In[66]:
accuracynb= evaluator.evaluate(predictionnb)
# In[67]:
print("Test set accuracy = " + str(accuracyrf))
# In[68]:
print("Test set accuracy = " + str(accuracylsvc))
#
# In[69]:
print("Test set accuracy = " + str(accuracynb))
# In[ ]:
# In[70]:
pdd=
|
pd.DataFrame([["", ""]], columns=("label","message"))
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.