prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import streamlit as st
import pandas as pd
import numpy as np
import pickle
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold, StratifiedKFold, ShuffleSplit, RepeatedStratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
st.write("""
# HealthCare App: Predict chance of Heart Attack
""")
st.sidebar.header("User Input Features")
st.sidebar.markdown("""
[Example CSV input file](https://github.com/update-ankur/Health-Care/blob/main/Dataset/heart.csv)
""")
#collect data from User
def user_input_feature():
age=st.sidebar.slider("Age",10,100,step=1)
sex=st.sidebar.selectbox("Sex",('Male','Female'))
cp=st.sidebar.slider("Chest pain type",0,3,1)
trestbps=st.sidebar.slider("resting blood pressure",0,200,1)
chol=st.sidebar.slider("serum cholestoral in mg/dl",0,564,1)
fbs=st.sidebar.slider("fasting blood sugar",0,1,1)
restecg=st.sidebar.slider("resting electrocardiographic results",0,2,1)
thalach=st.sidebar.slider("maximum heart rate achieved",0,564,1)
exang=st.sidebar.slider("exercise induced angina",0,2,1)
oldpeak=st.sidebar.slider("oldpeak = ST depression induced by exercise relative to rest",0.0,10.0,.1)
slope=st.sidebar.slider("the slope of the peak exercise ST segment",0,2,1)
ca=st.sidebar.slider("number of major vessels (0-3) colored by flourosopy",0,3,1)
thal=st.sidebar.slider("number of major vessels (0-3) colored by flourosopy",0,2,1)
data = {'age':age,
'sex':sex,
'cp':cp,
'trestbps':trestbps,
'chol':chol,
'fbs':fbs,
'restecg':restecg,
'thalach':thalach,
'exang':exang,
'oldpeak':oldpeak,
'slope':slope,
'ca':ca,
'thal':thal
}
features=pd.DataFrame(data,index=[0])
return features
upload_file=st.sidebar.file_uploader("upload your csv file.")
if upload_file is not None:
input_df=
|
pd.read_csv(upload_file)
|
pandas.read_csv
|
"""
Arguments accepted in command
-----------------------------------------------------------
batch_file: .csv file specifying tasks formatted as follows:
taskID,model_file,number_of_repeats
--workspace_dir
--result_file_prefix
--result_dir
--max_workers
Report related arguments accepted in command
----------------------------------------------------------------------------------
--task_log: specify the path to the task log formatted as the following csv format:
taskID,order_of_repeat,cpu_time_start,cpu_time_end,cpu_time_spent
--speedup_summary: specify the path to the speedup summary:
wall_clock_time_start,wall_clock_time_end,wall_clock_time,total_cpu_time,speedup_ratio
"""
import os
import sys
import argparse
import time
import pandas as pd
from mpi4py.futures import MPIPoolExecutor
from ..utility.path_manipulate import expandabspath as eap
DEFAULT_COPASI_PATH = 'CopasiSE'
DEFAULT_OUTPUT_DIR = 'results'
DEFAULT_RESULT_PREFIX = 'results'
DEFAULT_REPEATS_OF_TASK = 1
DEFAULT_MAX_WORKERS = 1
TASK_ENV = {
'BATCH_FILE':
None,
'WORKSPACE_DIR':
os.getcwd(),
'MODEL_FILE_PATH':
None,
'MODEL_FILE_DIR':
None,
'MODEL_FILENAME':
None,
'RESULT_DIR':
os.getcwd(),
'REPEATS_OF_TASK':
DEFAULT_REPEATS_OF_TASK,
'MAX_WORKERS':
DEFAULT_MAX_WORKERS,
'COPASI_PATH':
DEFAULT_COPASI_PATH,
'CONFIGDIR':
'{}/{}'.format(os.getenv('HOME'),
'COPASI-4.29.228-Linux-64bit/share/copasi/config')
}
task_log = None
speedup_summary = None
def run_model(taskID, order_of_repeat, TASK_ENV):
os.chdir(TASK_ENV['MODEL_FILE_DIR'])
output_path = '{}/{}.dat{}'.format(TASK_ENV['RESULT_DIR'],
TASK_ENV['MODEL_FILENAME'],
order_of_repeat)
cmd = '{} {} --report-file {} --nologo --copasidir {}'.format(
TASK_ENV['COPASI_PATH'], TASK_ENV['MODEL_FILE_PATH'], output_path,
TASK_ENV['CONFIGDIR'])
start_time = time.perf_counter()
os.system(cmd)
end_time = time.perf_counter()
time_spent = end_time - start_time
return taskID, order_of_repeat, start_time, end_time, time_spent
def get_parser():
"""
batch_file
--workspace_dir
--result_dir
--task_log
--speedup_summary
--max_workers
"""
parser = argparse.ArgumentParser()
parser.add_argument('--copasi_path',
'-c',
type=str,
default=DEFAULT_COPASI_PATH,
help='The path to the CopasiSE executable.')
parser.add_argument('batch_file', type=str, help='Path to the batch file.')
parser.add_argument('--workspace_dir',
'-F',
type=str,
default=os.getcwd(),
help='Directory of the workspace. A new one \
will be created if not exist.')
parser.add_argument(
'--result_dir',
'-d',
type=str,
default='results',
help='The diretory to which the results will be saved.')
parser.add_argument('--task_log',
'-l',
type=str,
default=None,
help='The path to which the task log will be saved.')
parser.add_argument(
'--speedup_summary',
'-s',
type=str,
default=None,
help='The path to which the speedup summary will be saved.')
parser.add_argument('--max_workers',
'-w',
type=int,
default=DEFAULT_MAX_WORKERS,
help='The number of max_workers.')
return parser
def set_env(args):
global task_log, speedup_summary
TASK_ENV['BATCH_FILE'] = eap(args.batch_file)
TASK_ENV['WORKSPACE_DIR'] = eap(args.workspace_dir)
if not os.path.exists(TASK_ENV['WORKSPACE_DIR']):
os.makedirs(TASK_ENV['WORKSPACE_DIR'])
os.chdir(TASK_ENV['WORKSPACE_DIR'])
TASK_ENV['RESULT_DIR'] = args.result_dir
TASK_ENV['MAX_WORKERS'] = args.max_workers
TASK_ENV['COPASI_PATH'] = args.copasi_path
if args.task_log or args.speedup_summary is None:
_, batch_filename = os.path.split(TASK_ENV['BATCH_FILE'])
if task_log is None:
task_log = os.path.abspath('{}_{}_workers.log'.format(
batch_filename, args.max_workers))
else:
task_log = args.task_log
if speedup_summary is None:
speedup_summary = os.path.abspath('{}_{}_speedup_summary'.format(
batch_filename, args.max_workers))
else:
speedup_summary = args.speedup_summary
def get_task_list(df):
"""
input
-------------------------------------------------------------
df: pandas DataFrame. Generated by reading the batch file.
output
----------------------------------------------------------------
taskID_list: list of int. ID of the task specified in the batch file.
order_of_repeat_list: list of int. Order of repeat of the task.
task_env_list: list of dict. Task environment setttings.
task_stats: dict. task stats including number of different tasks and
number of each tasks.
"""
taskID_list = []
order_of_repeat_list = []
task_env_dict = {'different_tasks':0}
task_env_list = []
task_stats = {'different_tasks':0}
for i in range(len(df)):
number_of_repeats = df.at[i, 'number_of_repeats']
taskID = df.at[i, 'taskID']
taskID_list.extend([taskID] * number_of_repeats)
if not task_stats.__contains__(taskID):
task_stats['different_tasks'] += 1
task_stats[taskID] = 0
task_env = TASK_ENV.copy()
task_env_dict[taskID] = task_env
else:
task_env = task_env_dict[taskID]
order_of_repeat_list.extend(range(task_stats[taskID], task_stats[taskID] + number_of_repeats))
task_stats[taskID] += number_of_repeats
task_env['MODEL_FILE_PATH'] = eap(df.at[i, 'model_file'])
task_env['MODEL_FILE_DIR'], task_env['MODEL_FILENAME'] = os.path.split(
task_env['MODEL_FILE_PATH'])
os.chdir(task_env['MODEL_FILE_DIR'])
if not os.path.exists(task_env['RESULT_DIR']):
os.makedirs(task_env['RESULT_DIR'])
task_env_list.extend([task_env] * number_of_repeats)
return taskID_list, order_of_repeat_list, task_env_list, task_stats
def main():
# get batch file via command
parser = get_parser()
args = parser.parse_args()
set_env(args)
# check if the batch file exists
if not os.path.exists(TASK_ENV['BATCH_FILE']):
print('Batch file not exist.')
sys.exit(1)
"""
read the .csv batch file:
taskID model_file number_of_repeats
------------------------------------------------------------------------------------------------
ID of the task path to the .cps file number of repeats of corresponding task specified by the .cps file
Comment lines start with '#'.
"""
df = pd.read_csv(TASK_ENV['BATCH_FILE'], comment='#')
# generate MPI task list
taskID_list, order_of_repeat_list, task_env_list, task_stats = get_task_list(df)
wall_clock_time_start = time.perf_counter()
with MPIPoolExecutor(max_workers=TASK_ENV['MAX_WORKERS']) as executor:
result_set = executor.map(run_model,
taskID_list,
order_of_repeat_list,
task_env_list,
unordered=True,
chunksize=1)
""" for res in executor.map(run_model,
taskID_list,
order_of_repeat_list,
task_env_list,
unordered=True,
chunksize=1):
res_df.append(res) """
res_df = pd.DataFrame(result_set, columns=[
'taskID', 'order_of_repeat', 'cpu_time_start',
'cpu_time_end', 'cpu_time_spent'])
wall_clock_time_end = time.perf_counter()
res_df.to_csv(task_log, index=False)
# wall_clock_time_start,wall_clock_time_end,wall_clock_time,total_cpu_time,speedup_ratio
wall_clock_time = wall_clock_time_end - wall_clock_time_start
total_cpu_time = sum(res_df['cpu_time_spent'])
speedup_ratio = total_cpu_time / wall_clock_time
d = {
'wall_clock_time_start': wall_clock_time_start,
'wall_clock_time_end': wall_clock_time_end,
'wall_clock_time': wall_clock_time,
'total_cpu_time': total_cpu_time,
'speedup_ratio': speedup_ratio
}
summary_df =
|
pd.DataFrame(data=d, index=[0])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2':
|
date_range('20130105', periods=5)
|
pandas.date_range
|
#!/usr/bin/env python3
import logging
import sys
import time
import re
from argparse import ArgumentParser
from datetime import datetime
import pandas as pd
import os
import homematicip
from homematicip.device import *
from homematicip.group import *
from homematicip.rule import *
from homematicip.home import Home
from homematicip.base.helpers import handle_config
log_file = "./temperature_log.csv"
def main():
pd.set_option('display.width', 200)
parser = ArgumentParser(
description="a cli wrapper for the homematicip API")
parser.add_argument(
"--config_file",
type=str,
help=
"the configuration file. If nothing is specified the script will search for it.",
)
parser.add_argument(
"--interval",
type=int,
help=
"the interval between two subsequent server requests for temperature data.",
)
try:
args = parser.parse_args()
except SystemExit:
return
except:
print("could not parse arguments")
parser.print_help()
return
_config = None
if args.config_file:
try:
_config = homematicip.load_config_file(args.config_file)
except FileNotFoundError:
print("##### CONFIG FILE NOT FOUND: {} #####".format(
args.config_file))
return
else:
_config = homematicip.find_and_load_config_file()
if _config is None:
print("Could not find configuration file. Script will exit")
return
_interval = 10
if args.interval:
_interval = args.interval
print("Using the interval: " + str(_interval))
else:
print("Using the default interval: " + str(_interval))
home = Home()
home.set_auth_token(_config.auth_token)
home.init(_config.access_point)
if not home.get_current_state():
print("homematicip cannot get its current state.")
return
print("\n=== Homematicip Initialized ===\n")
rooms_history = {}
data = []
i = 0
# Check if the log file already exist
if os.path.isfile(log_file):
df =
|
pd.read_csv(log_file)
|
pandas.read_csv
|
from datetime import datetime
from io import StringIO
import numpy
import pandas
import pytest
from hts.hierarchy import HierarchyTree
from hts.utilities.load_data import load_hierarchical_sine_data, load_mobility_data
@pytest.fixture
def events():
s = """ts,start_latitude,start_longitude,city
2019-12-06 12:29:16.789,53.565173,9.959418,hamburg
2019-12-06 12:28:37.326,50.120962,8.674268,frankfurt
2019-12-06 12:27:07.055,52.521168,13.410618,berlin
2019-12-06 12:26:25.989,51.492683,7.417612,dortmund
2019-12-06 12:25:40.222,52.537730,13.417372,berlin
2019-12-06 12:25:25.309,50.948847,6.951802,cologne
2019-12-06 12:23:53.633,48.166799,11.577420,munich
2019-12-06 12:23:05.292,50.113883,8.675192,frankfurt
2019-12-06 12:22:56.059,50.114847,8.672653,frankfurt
2019-12-06 12:22:39.471,50.943082,6.959962,cologne"""
df = pandas.read_csv(StringIO(s), index_col="ts", sep=",")
df.index = pandas.to_datetime(df.index)
return df
@pytest.fixture
def n_tree():
"""
This is the format of this tree
t 1
a b c 3
aa ab ba bb ca cb 6
aaa aab aba abb baa bab bba bbb caa cab cba cbb 12
Resulting in the summing matrix: y_t = S * b_t
t 1 1 1 1 1 1 1 1 1 1 1 1
a 1 1 1 1 0 0 0 0 0 0 0 0
b 0 0 0 0 1 1 1 1 0 0 0 0
c 0 0 0 0 0 0 0 0 1 1 1 1
aa 1 1 0 0 0 0 0 0 0 0 0 0
ab 0 0 1 1 0 0 0 0 0 0 0 0 aaa
ba 0 0 0 0 1 1 0 0 0 0 0 0 aab
bb 0 0 0 0 0 0 1 1 0 0 0 0 aba
ca 0 0 0 0 0 0 0 0 1 1 0 0 abb
cb 0 0 0 0 0 0 0 0 0 0 1 1 baa
aaa 1 0 0 0 0 0 0 0 0 0 0 0 bab
aab 0 1 0 0 0 0 0 0 0 0 0 0 bba
aba 0 0 1 0 0 0 0 0 0 0 0 0 bbb
abb 0 0 0 1 0 0 0 0 0 0 0 0 caa
baa 0 0 0 0 1 0 0 0 0 0 0 0 cab
bab 0 0 0 0 0 1 0 0 0 0 0 0 cba
bba 0 0 0 0 0 0 1 0 0 0 0 0 cbb
bbb 0 0 0 0 0 0 0 1 0 0 0 0
caa 0 0 0 0 0 0 0 0 1 0 0 0
cab 0 0 0 0 0 0 0 0 0 1 0 0
cba 0 0 0 0 0 0 0 0 0 0 1 0
cbb 0 0 0 0 0 0 0 0 0 0 0 1
"""
t = ("t", 1)
t1 = [("a", 2), ("b", 2), ("c", 3)]
t2 = [("aa", 4), ("ab", 5), ("ba", 6), ("bb", 4), ("ca", 5), ("cb", 6)]
t3 = [
("aaa", 4),
("aab", 5),
("aba", 6),
("abb", 4),
("baa", 5),
("bab", 6),
("bba", 5),
("bbb", 6),
("caa", 5),
("cab", 6),
("cba", 5),
("cbb", 6),
]
test_t = HierarchyTree(key=t[0], item=t[1])
for i, j in t1:
test_t.add_child(key=i, item=j)
for c in test_t.children:
for i, j in t2:
if i.startswith(c.key):
c.add_child(key=i, item=j)
for c in test_t.children:
for c2 in c.children:
for i, j in t3:
if i.startswith(c2.key):
c2.add_child(key=i, item=j)
return test_t
@pytest.fixture
def hierarchical_sine_data():
s, e = datetime(2019, 1, 15), datetime(2019, 10, 15)
return load_hierarchical_sine_data(s, e)
@pytest.fixture
def hierarchical_mv_data():
return load_mobility_data()
@pytest.fixture
def mv_tree(hierarchical_mv_data):
hier = {
"total": ["CH", "SLU", "BT", "OTHER"],
"CH": ["CH-07", "CH-02", "CH-08", "CH-05", "CH-01"],
"SLU": ["SLU-15", "SLU-01", "SLU-19", "SLU-07", "SLU-02"],
"BT": ["BT-01", "BT-03"],
"OTHER": ["WF-01", "CBD-13"],
}
exogenous = {
k: ["precipitation", "temp"]
for k in hierarchical_mv_data.columns
if k not in ["precipitation", "temp"]
}
return HierarchyTree.from_nodes(hier, hierarchical_mv_data, exogenous=exogenous)
@pytest.fixture
def sine_hier():
return {
"total": ["a", "b", "c"],
"a": ["a_x", "a_y"],
"b": ["b_x", "b_y"],
"c": ["c_x", "c_y"],
"a_x": ["a_x_1", "a_x_2"],
"a_y": ["a_y_1", "a_y_2"],
"b_x": ["b_x_1", "b_x_2"],
"b_y": ["b_y_1", "b_y_2"],
"c_x": ["c_x_1", "c_x_2"],
"c_y": ["c_y_1", "c_y_2"],
}
@pytest.fixture
def uv_tree(sine_hier, hierarchical_sine_data):
hsd = hierarchical_sine_data.resample("1H").apply(sum).head(400)
return HierarchyTree.from_nodes(sine_hier, hsd)
@pytest.fixture
def load_df_and_hier_uv(sine_hier, hierarchical_sine_data):
return hierarchical_sine_data.resample("1H").apply(sum), sine_hier
@pytest.fixture
def sample_ds():
cid = numpy.repeat([10, 500], 40)
ckind = numpy.repeat(["a", "b", "a", "b"], 20)
csort = [
30,
53,
26,
35,
42,
25,
17,
67,
20,
68,
46,
12,
0,
74,
66,
31,
32,
2,
55,
59,
56,
60,
34,
69,
47,
15,
49,
8,
50,
73,
23,
62,
24,
33,
22,
70,
3,
38,
28,
75,
39,
36,
64,
13,
72,
52,
40,
16,
58,
29,
63,
79,
61,
78,
1,
10,
4,
6,
65,
44,
54,
48,
11,
14,
19,
43,
76,
7,
51,
9,
27,
21,
5,
71,
57,
77,
41,
18,
45,
37,
]
cval = [
11,
9,
67,
45,
30,
58,
62,
19,
56,
29,
0,
27,
36,
43,
33,
2,
24,
71,
41,
28,
50,
40,
39,
7,
53,
23,
16,
37,
66,
38,
6,
47,
3,
61,
44,
42,
78,
31,
21,
55,
15,
35,
25,
32,
69,
65,
70,
64,
51,
46,
5,
77,
26,
73,
76,
75,
72,
74,
10,
57,
4,
14,
68,
22,
18,
52,
54,
60,
79,
12,
49,
63,
8,
59,
1,
13,
20,
17,
48,
34,
]
df = pandas.DataFrame({"id": cid, "kind": ckind, "sort": csort, "val": cval})
df = df.set_index("id", drop=False)
df.index.name = None
return df
@pytest.fixture
def visnights_hier():
return {
"total": ["NSW", "OTH", "WAU", "SAU", "QLD", "VIC"],
"NSW": ["NSW_Metro", "NSW_NthCo", "NSW_NthIn", "NSW_SthCo", "NSW_SthIn"],
"OTH": ["OTH_Metro", "OTH_NoMet"],
"QLD": ["QLD_Cntrl", "QLD_Metro", "QLD_NthCo"],
"SAU": ["SAU_Coast", "SAU_Inner", "SAU_Metro"],
"VIC": ["VIC_EstCo", "VIC_Inner", "VIC_Metro", "VIC_WstCo"],
"WAU": ["WAU_Coast", "WAU_Inner", "WAU_Metro"],
}
@pytest.fixture
def hierarchical_visnights_data():
vis_idx =
|
pandas.date_range(start="1998-01-01", periods=8, freq="QS")
|
pandas.date_range
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
|
tm.reset_display_options()
|
pandas.util.testing.reset_display_options
|
""" SEG-Y geometry. """
import os
from itertools import product
import numpy as np
import pandas as pd
import h5pickle as h5py
import segyio
import cv2
from .base import SeismicGeometry
from ..utils import find_min_max, lru_cache, SafeIO
from ...batchflow import Notifier
class SeismicGeometrySEGY(SeismicGeometry):
""" Class to infer information about SEG-Y cubes and provide convenient methods of working with them.
A wrapper around `segyio` to provide higher-level API.
In order to initialize instance, one must supply `path`, `headers` and `index`:
- `path` is a location of SEG-Y file
- `headers` is a sequence of trace headers to infer from the file
- `index_headers` is a subset of `headers` that is used as trace (unique) identifier:
for example, `INLINE_3D` and `CROSSLINE_3D` has a one-to-one correspondance with trace numbers.
Another example is `FieldRecord` and `TraceNumber`.
Default values of `headers` and `index_headers` are ones for post-stack seismic
(with correctly filled `INLINE_3D` and `CROSSLINE_3D` headers),
so that post-stack cube can be loaded by providing path only.
Each instance is basically built around `dataframe` attribute, which describes mapping from
indexing headers to trace numbers. It is used to, for example, get all trace indices from a desired `FieldRecord`.
`set_index` method can be called to change indexing headers of the dataframe.
One can add stats to the instance by calling `collect_stats` method, that makes a full pass through
the cube in order to analyze distribution of amplitudes. It also collects a number of trace examples
into `trace_container` attribute, that can be used for later evaluation of various statistics.
"""
#pylint: disable=attribute-defined-outside-init, too-many-instance-attributes, redefined-builtin
def __init__(self, path, headers=None, index_headers=None, **kwargs):
self.structured = False
self.quantized = False
self.dataframe = None
self.segyfile = None
self.headers = headers or self.HEADERS_POST_FULL
self.index_headers = index_headers or self.INDEX_POST
super().__init__(path, **kwargs)
def set_index(self, index_headers, sortby=None):
""" Change current index to a subset of loaded headers. """
self.dataframe.reset_index(inplace=True)
if sortby:
self.dataframe.sort_values(index_headers, inplace=True, kind='mergesort')# the only stable sorting algorithm
self.dataframe.set_index(index_headers, inplace=True)
self.index_headers = index_headers
self.add_attributes()
# Methods of inferring dataframe and amplitude stats
def process(self, collect_stats=True, recollect=False, **kwargs):
""" Create dataframe based on `segy` file headers. """
# Note that all the `segyio` structure inference is disabled
self.segyfile = SafeIO(self.path, opener=segyio.open, mode='r', strict=False, ignore_geometry=True)
self.segyfile.mmap()
self.depth = len(self.segyfile.trace[0])
self.delay = self.segyfile.header[0].get(segyio.TraceField.DelayRecordingTime)
self.sample_rate = segyio.dt(self.segyfile) / 1000
# Load all the headers
dataframe = {}
for column in self.headers:
dataframe[column] = self.segyfile.attributes(getattr(segyio.TraceField, column))[slice(None)]
dataframe =
|
pd.DataFrame(dataframe)
|
pandas.DataFrame
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
from rasa_sdk.events import Restarted
from rasa_sdk.events import AllSlotsReset
import zomatopy
import json
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import re
import pandas as pd
import numpy as np
tier_1_2_cities = ['Agra', 'Ahmedabad', 'Ajmer', 'Aligarh', 'Amravati', 'Amritsar', 'Asansol', 'Aurangabad', 'Bareilly', 'Belgaum',\
'Bengaluru', 'Bhavnagar', 'Durg Bhilai', 'Mumbai', 'Bhopal', 'Bhubaneswar', 'Bijapur', 'Bikaner', 'Bilaspur',\
'Bokaro', 'Chandigarh', 'Chennai', 'Coimbatore', 'Cuttack', 'Dehradun', 'Delhi NCR', 'Dhanbad', 'Dindigul', \
'Durgapur', 'Erode', 'Delhi NCR', 'Firozabad', 'Delhi NCR', 'Gorakhpur', 'Gulbarga', 'Guntur', 'Delhi NCR', \
'Guwahati', 'Gwalior', 'Hamirpur', 'Dharwad', 'Hyderabad', 'Indore', 'Jabalpur', 'Jaipur', 'Jalandhar', 'Jammu', \
'Jamnagar', 'Jamshedpur', 'Jhansi', 'Jodhpur', 'Kakinada', 'Kannur', 'Kanpur', 'Karnal', 'Kochi', 'Kolhapur', 'Kolkata',\
'Kollam', 'Kozhikode', 'Kurnool', 'Lucknow', 'Ludhiana', 'Madurai', 'Malappuram', 'Mangalore', 'Mathura', 'Meerut', \
'Moradabad', 'Mumbai', 'Mysore', 'Nagpur', 'Nanded', 'Nashik', 'Nellore', 'Delhi NCR', 'Patna', 'Chennai',\
'Allahabad', 'Pune', 'Purulia', 'Raipur', 'Rajahmundry', 'Rajkot', 'Ranchi', 'Rourkela', 'Salem', 'Sangli',\
'Shimla', 'Siliguri', 'Solapur', 'Srinagar', 'Surat', 'Chennai', 'Trivandrum', 'Thrissur','Vadodara','Varanasi',\
'Ujjain','Virar','Tirunelveli','Vellore','Vijayawada','Visakhapatnam','Warangal']
##List of Tier 1 and Tier 2 cities
tier_1_2_city_names= [city.lower() for city in tier_1_2_cities]
##Validating Location
def Check_Location(loc, city_names= tier_1_2_city_names):
config={"user_key":"337f3a03601af0bbcc30b2e3506be18d"}
zomato = zomatopy.initialize_app(config)
location_detail=zomato.get_location(loc, 1)
location_json = json.loads(location_detail)
number_of_loc = len(location_json['location_suggestions'])
try:
if number_of_loc==0:
return {'location_result': 'Not Found!', 'location_name': None}
elif (location_json['location_suggestions'][0]['city_name']).lower() not in tier_1_2_city_names:
return {'location_result': "Sorry! We do not operate in this area yet.", 'location_name': None}
else:
return {'location_result': "Location Found!", 'location_name': location_json['location_suggestions'][0]['city_name']}
except:
dispatcher.utter_message("Sorry, please enter a valid request!")
class ActionCheckLocation(Action):
def name(self):
return 'action_check_location'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
check= Check_Location(loc)
return[SlotSet('location',check['location_name'])]
class Actionvalidatecuisine(Action):
def name(self):
return 'action_validate_cuisine'
def run(self,dispatcher,tracker,domain):
cuisine_list = ['chinese','mexican','italian','american','south indian','north indian']
requested_cuisine = tracker.get_slot('cuisine')
if requested_cuisine is not None:
if requested_cuisine.lower() in cuisine_list:
return[SlotSet('cuisine', requested_cuisine)]
else:
dispatcher.utter_message("Sorry, the requested cuisine is invalid. Please provide a valid cuisine.")
return[SlotSet('cuisine', None)]
else:
dispatcher.utter_message("Sorry, I could not understand the requested cuisine. Please re-enter the cuisine.")
return [SlotSet('cuisine', None)]
class ActionAskBudget(Action):
def name(self):
return 'action_ask_budget'
def run(self,dispatcher,tracker,domain):
high_list= ['more than 700', 'more than rs. 700', 'more than rs 700', 'more 700', '>700', '> 700', 'high', 'elite', 'expensive', 'luxurious', '700+', '700 plus', 'greater than 700', 'higher than 700', 'more than 700', 'greater 700', 'costly']
low_list=['lesser than rs. 300', 'lesser than rs.300', 'lesser than rs300', 'lesser than rs. 300','less 300', 'lesser than rs 300', 'affordable', 'less than rs 300', 'lesser than 300', 'less than 300', '<300', '< 300', 'max 300', 'below 300', 'until 300', 'low range', 'low', 'limit 300', 'max lim 300', 'max limit 300', 'max budget 300', 'less than rs. 300']
mid_list= ['between 300 and 700','between rs.300 to 700', 'between rs300 to 700', 'rs. 300 to 700', '300-700', 'between 300-700', 'between rs. 300 to 700', 'between rs 300 to 700', 'between 300 to 700', '300 to 700', 'mid range', 'mid', 'moderate price range', 'moderate range', 'moderate']
requested_budget = tracker.get_slot('budget')
requested_budget_lower = (requested_budget.lower()).strip()
try:
if requested_budget_lower in low_list:
return ([SlotSet('budget', 'low')])
elif requested_budget_lower in high_list:
return ([SlotSet('budget', 'high')])
elif requested_budget_lower in mid_list:
return ([SlotSet('budget', 'mid')])
else:
dispatcher.utter_message("Sorry, the budget entry is invalid. Please re-enter a valid request!")
return ([SlotSet('budget', None)])
except:
dispatcher.utter_message("Sorry, the entry is invalid. Please re-enter a valid request!")
return ([SlotSet('budget', None)])
class ActionSearchRestaurants(Action):
def name(self):
return 'action_search_restaurants'
def run(self, dispatcher, tracker, domain):
config = {"user_key": "337f3a03601af0bbcc30b2e3506be18d"}
zomato = zomatopy.initialize_app(config)
loc = tracker.get_slot('location')
cuisine = tracker.get_slot('cuisine')
location_detail = zomato.get_location(loc, 1)
budget_detail = tracker.get_slot('budget')
if budget_detail == 'low':
min_val = 0
max_val = 300
elif budget_detail == 'mid':
min_val = 301
max_val = 700
else:
min_val = 701
max_val = 10000000
d1 = json.loads(location_detail)
lat = d1["location_suggestions"][0]["latitude"]
lon = d1["location_suggestions"][0]["longitude"]
cuisines_dict = {'american': 1, 'mexican': 73, 'chinese': 25, 'italian': 55, 'north indian': 50, 'south indian': 85}
results = zomato.restaurant_search("", lat, lon, str(cuisines_dict.get(cuisine)), 20)
d = json.loads(results)
results_shown = int(d['results_shown'])
name = []
location = []
avg_cost = []
agg_rating = []
response = ""
for i in range(0, results_shown):
name.append(d['restaurants'][i]['restaurant']['name'])
location.append(d['restaurants'][i]['restaurant']['location']['address'])
avg_cost.append(int(d['restaurants'][i]['restaurant']['average_cost_for_two']))
agg_rating.append(float(d['restaurants'][i]['restaurant']['user_rating']['aggregate_rating']))
df_display =
|
pd.DataFrame({'Name': name, 'Location': location, 'average_cost_for_two': avg_cost, 'Ratings': agg_rating})
|
pandas.DataFrame
|
import simpy
import warnings
from pandas import DataFrame
from ..core.manager import Manager
from .resource_manager import ResourceManager
class ProcessManager(Manager):
def __init__(self, **kwargs):
"""
Manager of processes.
Keyword Args:
rm (:class:`.ResourceManager`): if not set, a new :class:`.ResourceManager`
is created
Attributes:
env (:class:`simpy.Environment`): Environment linked to this manager
flow (DataFrame): Processes flow
processes (dict_keys): All processes attached to this manager
rm (:class:`.ResourceManager`): :class:`.ResourceManager` linked to this manager
"""
super().__init__()
self.env = kwargs.get('env', simpy.Environment())
self.rm = kwargs.get('rm', ResourceManager(self.env))
self.processes = self._store.keys()
self.reset_flow()
def create_resource(self, names, **kwargs):
"""
Shortcut for `create_resource` method in :class:`.ResourceManager`.
Args:
names (str): List of names of resources to be created
**kwargs: Arbitrary keyword arguments
"""
self.rm.create_resource(names, **kwargs)
def get_resource(self, name):
"""
Shortcut for `get_resource` method in :class:`.ResourceManager`.
Args:
name (str): Name of the resource
"""
return self.rm.get_resource(name)
def get_resources(self, **kwargs):
"""
Shortcut for `get_resources` method in :class:`.ResourceManager`.
"""
return self.rm.get_resources(**kwargs)
def attach_process(self, process_class, **kwargs):
"""
Attach process to the simulation.
Args:
process_class (:class:`.Process`)
Keyword Args:
name (str): if not set, the name of the class is used
"""
name = kwargs.get('name', process_class.__name__)
process = process_class(self.env, self.rm, name)
self._store[name] = process
def get_process(self, name):
"""
Get process by name.
Args:
name (str)
Returns:
:class:`.Process`
"""
return self._store[name]
def reset_flow(self):
"""
Clear flow lookup table.
"""
self.flow =
|
DataFrame(columns=['from', 'to'])
|
pandas.DataFrame
|
from six.moves import builtins
__all__ = (
'display',
)
def display(obj, **kwargs):
# Perform lazy-import for fast initialization.
# Note: pd/np are likely to be already imported by user codes!
import pandas as pd
import numpy as np
if isinstance(obj, pd.DataFrame):
r = (b'html', obj.to_html().encode('utf8'))
builtins._sorna_emit(r)
return
if isinstance(obj, np.ndarray):
df =
|
pd.DataFrame(obj, **kwargs)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
@pytest.mark.usefixtures("missing_data_numeric")
class TestMissingValueFiller(object):
def test_missing_factors(self, missing_data_factors):
# Test filling in missing factors with a string.
prep = MissingValueFiller('Missing')
result = prep.fit_transform(missing_data_factors)
exp_dict = {'c': ['a', 'Missing', 'a', 'b', 'b', 'Missing', 'c', 'a',
'a', 'c'],
'd': ['a', 'a', 'Missing', 'Missing', 'e', 'f', 'Missing',
'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_numeric(self, missing_data_numeric):
# Test filling in missing numeric data with a number.
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data_numeric):
# Test unordered index is handled properly
new_index = list(missing_data_numeric.index)
shuffle(new_index)
missing_data_numeric.index = new_index
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestOverMissingThresholdDropper(object):
def test_drop_20(self, missing_data):
# Test dropping columns with missing over a threshold.
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(1.5)
svatd
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(-1)
svatd
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestValueReplacer(object):
def test_mapper(self, full_data_factors):
# Test replacing values with mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_inverse_mapper(self, full_data_factors):
# Test replacing values with inverse_mapper.
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
prep = ValueReplacer(inverse_mapper=inv_mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, full_data_factors):
# Test throwing error when replacing values with a non-existant column.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
with pytest.raises(ValueError):
prep.fit(full_data_factors)
def test_2_mappers_value_error(self):
# Test throwing error when specifying mapper and inverse_mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
with pytest.raises(ValueError):
prep = ValueReplacer(mapper=mapper, inverse_mapper=inv_mapper)
prep
def test_no_mappers_value_error(self):
# Test throwing error when not specifying mapper or inverse_mapper.
with pytest.raises(ValueError):
prep = ValueReplacer()
prep
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
class TestFactorLimiter(object):
def test_limiter(self, missing_data_factors):
# Test limiting factor levels to specified levels with default.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, missing_data_factors):
# Test throwing error when limiting values with a non-existant column.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'e': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
fl = FactorLimiter(factors)
with pytest.raises(ValueError):
fl.fit(missing_data_factors)
def test_unordered_index(self, missing_data_factors):
# Test unordered index is handled properly
new_index = list(missing_data_factors.index)
shuffle(new_index)
missing_data_factors.index = new_index
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestSingleValueAboveThresholdDropper(object):
def test_drop_70_with_na(self, missing_data):
# test dropping columns with over 70% single value, including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_70_without_na(self, missing_data):
# test dropping columns with over 70% single value, not including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=True)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(1.5)
prep
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(-1)
prep
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("single_values_data")
class TestSingleValueDropper(object):
def test_without_na(self, single_values_data):
# Test dropping columns with single values, excluding NaNs as a value.
prep = SingleValueDropper(dropna=True)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'e': [1, 2, None, None, None, None, None, None, None,
None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_with_na(self, single_values_data):
# Test dropping columns with single values, including NaNs as a value.
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, single_values_data):
# Test unordered index is handled properly
new_index = list(single_values_data.index)
shuffle(new_index)
single_values_data.index = new_index
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnExtractor(object):
def test_extraction(self, missing_data):
# Test extraction of columns from a DataFrame.
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_column_missing_error(self, missing_data):
# Test throwing error when an extraction is requested of a missing.
# column
prep = ColumnExtractor(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnDropper(object):
def test_drop_multiple(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_single(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper('d')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_error(self, missing_data):
# Test throwing error when dropping is requested of a missing column
prep = ColumnDropper(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
@pytest.mark.usefixtures("full_data_factors_subset")
@pytest.mark.usefixtures("missing_data_factors")
class TestDummyCreator(object):
def test_default_dummies(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
|
tm.assert_frame_equal(result, expected, check_dtype=False)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python
"""Script that parses CATME peer evaluation data and plots summary plots and
statistics.
The CATME Peer evaluation results are provided in a CSV file which contains
more than one table and mixed in metadata. The data are separated by double
line returns and are as follows:
1. Extraneous metadata
2. Table of answers to the per team member rating questions (it has two header
lines)
3. An aggregation table of the data in part 2
The next optional sections are a list of a set of question answers followed by
a table of the responses to those questions. Here are the options for these
sets of questions that are tied to a score of 1, 2, 3, 4, or 5.
Team Conflict
=============
1. None or Not at all
2. Little or Rarely
3. Some
4. Much or Often
5. Very Much or Very Often
Team Satisfaction and Team Perspectives
=======================================
1. Strongly Disagree
2. Disagree
3. Neither Agree Nor Disagree
4. Agree
5. Strongly Agree
The final section are the private comments that the students provide.
"""
import os
import textwrap
from io import StringIO
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import seaborn as sns
def load_main_table(table_text):
"""Returns a data frame with the peer to peer ratings for a single CATME
peer evaluation given the text from the CSV export."""
lines = table_text.split('\n')
i = 1
cols = []
for thing in lines[1].split('","'):
if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:
cols.append(thing.strip() + str(i) + ' ')
if thing == 'H ':
i += 1
else:
cols.append(thing)
lines[1] = '","'.join(cols)
text = "\n".join(lines[1:])
df = pd.read_csv(StringIO(text))
df.index = df['Student ID']
return df
def find_delinquent_students(df):
"""Returns a list of student names who did not fill out the survey."""
# TODO : Setup to print results with students name and email so an email
# can quickly be sent to all students.
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
delinquent_students = []
for name, group in df.groupby('Team ID'):
na_cols = group.columns[group.isna().any()].tolist()
num_members = len(group)
delinquent_rater_nums = set([int(name.strip()[-1]) for name in na_cols
if is_int(name.strip()[-1])])
delinquent_students += [
group['Student Name'][group['Rater #'] == num].values[0]
for num in delinquent_rater_nums if num <= num_members]
return delinquent_students
def merge_adjustment_factor(*dataframes, with_self=True):
"""Returns a data frame with student id as the index and the peer
evaluation instances as the columns. The entry is the adjustment factor
value. A numerical value is also computed in the column 'Improvement' that
shows whether they were ranked higher or lower as time progressed.
Parameters
==========
with_self : boolean, optional
If True the adjustment factor that includes the students self rating is
returned.
"""
# TODO : Maybe it would be better to select the better of the two
# adjustement factors. For students rated low by their team, their rating
# would improve the score and for students that rated themselves lower than
# their team would get a boost too.
if with_self:
col = 'Adj Factor (w/ Self)'
else:
col = 'Adj Factor (w/o Self)'
data = {}
for i, df in enumerate(dataframes):
data['P{}'.format(i + 1)] = df[col]
data['Student Name'] = df['Student Name']
data = pd.DataFrame(data)
data = data.dropna() # if student drops and is deleted after peer eval
# calculate a slope value, improvement metric, that characterizes whether
# the students' ranking improved over time or didn't, positive values are
# improvements and negative means the got rated worse over time
x_vals = list(range(len(dataframes)))
slopes = []
means = []
stds = []
adjusted_scores = []
# I weight the later evals more than the prior ones because the later ones
# tend to be more serious because the stakes become more real as the class
# progresses.
eval_names = ['P1', 'P2', 'P3', 'P4']
weights = [0.85, 0.90, 0.95, 1.0]
for idx, row in data.iterrows():
y_vals = row[eval_names[:len(dataframes)]].values.astype(float)
# Weight the latter reviews more than the earlier reviews.
mean = np.average(y_vals, weights=weights[:len(dataframes)])
# Calculate a "slope" val that indicates how little or how much
# improvement there was.
opt, _ = curve_fit(lambda x, slope, intercept: slope * x + intercept,
x_vals, y_vals)
improvement = opt[0]
# If the student was rated low but improved over time, bump their
# factor up based on the improvement. Also, don't allow any factor's
# lower than 0.75. A factor of 0.75 can drop the grade two letter
# grades (based on 85).
if mean < 0.95 and improvement > 0.0:
adjusted_score = mean + 1.5 * improvement
else:
adjusted_score = max([0.75, mean])
adjusted_score = mean
means.append(mean)
stds.append(y_vals.std())
slopes.append(improvement)
adjusted_scores.append(adjusted_score)
data['Improvement'] = slopes
data['Mean Adj Factor'] = means
data['STD Adj Factor'] = stds
data['Final Adj Factor'] = adjusted_scores
return data
def plot_student_adj(df, with_self=True):
"""Returns three axes. The first is a bar plot of the adjustment factor for
each student. The second is a bar plot showing the improvement value. And
the third is a bar plot of the adjustment factor modified by the
improvement score."""
fig, axes = plt.subplots(3, sharex=True)
df = df.sort_values('Final Adj Factor')
df.plot(x='Student Name', y='Mean Adj Factor', kind='bar',
yerr='STD Adj Factor', ylim=(0.6, 1.1), ax=axes[0])
df.plot(x='Student Name', y='Improvement', kind='bar', ax=axes[1])
df.plot(x='Student Name', y='Final Adj Factor', kind='bar',
ylim=(0.70, 1.1), ax=axes[2])
return axes
def load_catme_data_sections(path_to_file):
"""Returns a list of text sections from the CATME csv export."""
with open(path_to_file, 'r') as f:
text = f.read()
sections = text.split('\n\n')
return sections
def create_team_factor(df):
# NOTE : This is not complete and not used anywhere. Needs more work.
# TODO : What to do about note="over"
df['Team Factor'] = df['Adj Factor (w/ Self)']
unders = df['Note'] == 'Under'
df['Team Factor'][unders] = df['Adj Factor (w/o Self)'][unders]
df['Team Factor'][df['Team Factor'] > 1.05] = 1.05
df['Team Factor'][(df['Team Factor'] >= 0.95) & (df['Team Factor'] < 1.0)] = 1.0
if 'Manip' in df['Note']:
df['Team Factor'][df['Team ID'] == df.loc[df['Note'] == 'Manip']['Team ID'].values[0]] = 1.0
return df
def parse_team_questions(question_map_text, score_text):
"""Returns a data frame with each asked question in a row.
Team Conflict
=============
Example text that maps an ID to the actual question::
"T1","How much conflict of ideas is there in your work group? (Task Conflict)"
"T2","How frequently do you have disagreements within your work group about the task of the project you are working on? (Task Conflict)"
"T3","How often do people in your work group have conflicting opinions about the project you are working on? (Task Conflict)"
"R1","How much relationship tension is there in your work group? (Relationship Conflict)"
"R2","How often do people get angry while working in your group? (Relationship Conflict)"
"R3","How much emotional conflict is there in your work group? (Relationship Conflict)"
"P1","How often are there disagreements about who should do what in your work group? (Process Conflict)"
"P2","How much conflict is there in your group about task responsibilities? (Process Conflict)"
"P3","How often do you disagree about resource allocation in your work group? (Process Conflict)"
This text is then followed by the scores for those questions::
,,,"Relationship Conflict",,,,,"Task Conflict",,,,,"Process Conflict",,,,,"Overall",,
"Student Name","Student ID","Team ID","R1","R2","R3","Mn","SD","T1","T2","T3","Mn","SD","P1","P2","P3","Mn","SD","Mn","SD"
"Surname01, Firstname01","12345","team01","1","1","1","1.00","0.00","1","1","1","1.00","0.00","1","1","1","1.00","0.00","1.00","0.00"
"Surname02, Firstname02","12346","team01","2","1","1","1.33","0.58","3","2","3","2.67","0.58","2","3","2","2.33","0.58","2.11","0.78"
"Surname03, Firstname03","12347","team01","1","1","1","1.00","0.00","2","1","1","1.33","0.58","1","1","1","1.00","0.00","1.11","0.33"
"Surname04, Firstname04","12348","team01","1","1","1","1.00","0.00","2","2","2","2.00","0.00","2","2","1","1.67","0.58","1.56","0.53"
Team Satisfaction
=================
"Q1","I am satisfied with my present teammates"
"Q2","I am pleased with the way my teammates and I work together"
"Q3","I am very satisfied with working in this team"
,,,"Team Satisfaction",,,,,
"Student Name","Student ID","Team ID","Q1","Q2","Q3","Mn","SD"
"Surname01, Firstname01","12345","team01","4","4","4","4.00","0.00"
"Surname02, Firstname02","12346","team01","4","4","3","3.67","0.58"
Team Perspectives
=================
"TA1","Being part of the team allows team members to do enjoyable work (Task Attraction)"
"TA2","Team members get to participate in enjoyable activities (Task Attraction)"
"TA3","Team members like the work that the group does (Task Attraction)"
"IC1","Team members like each other (Interpersonal Cohesiveness)"
"IC2","Team members get along well (Interpersonal Cohesiveness)"
"IC3","Team members enjoy spending time together (Interpersonal Cohesiveness)"
"TC1","Our team is united in trying to reach its goals for performance (Task Commitment)"
"TC2","I'm unhappy with my team's level of commitment to the task (Task Commitment) [scale reversed]"
"TC3","Our team members have conflicting aspirations for the team's performance (Task Commitment) [scale reversed]"
,,,"Interpersonal Cohesiveness",,,,,"Task Commitment",,,,,"Task Attraction",,,,,"Overall",,
"Student Name","Student ID","Team ID","IC1","IC2","IC3","Mn","SD","TC1","TC2","TC3","Mn","SD","TA1","TA2","TA3","Mn","SD","Mn","SD"
"Surname01, Firstname01","12345","team01","5","5","4","4.67","0.58","5","1","2","4.67","0.58","5","4","4","4.33","0.58","4.56","0.53"
"Surname02, Firstname02","12346","team01","4","4","3","3.67","0.58","4","3","4","3.00","1.00","4","3","4","3.67","0.58","3.44","0.73"
"Surname03, Firstname03","12347","team01","5","5","5","5.00","0.00","5","1","2","4.67","0.58","5","5","5","5.00","0.00","4.89","0.33"
"""
# need to remove the first line because it is an extraneous header and any
# lines with summary stats
lines = [l for l in score_text.split('\n')[1:] if 'Team Stats' not in l]
df = pd.read_csv(StringIO('\n'.join(lines)))
# remove stats columns
df = df.select(lambda x: not (x.startswith('Mn') or x.startswith('SD')),
axis=1)
# transform to long format
question_cols = [s for s in df.columns if s[-1].isdigit()]
long_df = pd.melt(df, id_vars=['Student ID', 'Student Name', 'Team ID'],
value_vars=question_cols)
long_df = long_df.rename(columns={'variable': 'Question ID',
"value": 'Score'})
question_map = {}
for line in question_map_text.split('\n')[1:]:
code, question = line.split(',')
question_map[code[1:-1]] = question.split(' (')[0][1:]
long_df['Question'] = long_df['Question ID']
long_df.replace({'Question': question_map}, inplace=True)
return long_df
if __name__ == "__main__":
# 2017
DIR = '/home/moorepants/Drive/Teaching/EME185/2017/peer-evaluations'
FNAME_TEMP = 'Moore-Peer_Evaluation_{}-EME_185-Winter_2017.csv'
# 2018
DIR = '/home/moorepants/Drive/Teaching/EME185/2018/peer-evaluations'
FNAME_TEMP = 'Moore-2018_EME185_Peer_Evaluation_#{}-EME_185-Winter_2018.csv'
# 2019
DIR = '/home/moorepants/Drive/Teaching/EME185/2019/peer-evaluations'
FNAME_TEMP = 'Moore-Peer_Evaluation_{}-EME_185-Winter_2019.csv'
if not os.path.exists(os.path.join(DIR, 'charts')):
os.makedirs(os.path.join(DIR, 'charts'))
files = os.listdir(DIR)
dfs = []
team_ques_dfs = []
# TODO : The range should adjust based on the number of files in the
# directory.
for i in range(4):
path = os.path.join(DIR, FNAME_TEMP.format(i + 1))
sections = load_catme_data_sections(path)
dfs.append(load_main_table(sections[1]))
conflict_df = parse_team_questions(sections[3], sections[4])
conflict_df['Evaluation'] = i + 1
conflict_df['Question Page'] = 'Team Conflict'
satisfaction_df = parse_team_questions(sections[5], sections[6])
satisfaction_df['Evaluation'] = i + 1
satisfaction_df['Question Page'] = 'Team Satisfaction'
perspectives_df = parse_team_questions(sections[7], sections[8])
perspectives_df['Evaluation'] = i + 1
perspectives_df['Question Page'] = 'Team Perspectives'
team_ques_dfs.append(pd.concat([conflict_df, satisfaction_df,
perspectives_df], ignore_index=True))
team_questions_df =
|
pd.concat(team_ques_dfs, ignore_index=True)
|
pandas.concat
|
from __future__ import annotations
import pytest
from pandas.errors import ParserWarning
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
to_datetime,
)
import pandas._testing as tm
from pandas.io.xml import read_xml
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
@pytest.fixture(
params=[None, {"book": ["category", "title", "author", "year", "price"]}]
)
def iterparse(request):
return request.param
def read_xml_iterparse(data, **kwargs):
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write(data)
return read_xml(path, **kwargs)
xml_types = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
</row>
</data>"""
xml_dates = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
<date>2020-01-01</date>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
<date>2021-01-01</date>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
<date>2022-01-01</date>
</row>
</data>"""
# DTYPE
def test_dtype_single_str(parser):
df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "str"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_all_str(parser):
df_result = read_xml(xml_dates, dtype="string", parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
dtype="string",
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": ["4.0", None, "3.0"],
"date": ["2020-01-01", "2021-01-01", "2022-01-01"],
},
dtype="string",
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_with_names(parser):
df_result = read_xml(
xml_dates,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
parser=parser,
)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"Col1": ["square", "circle", "triangle"],
"Col2": Series(["00360", "00360", "00180"]).astype("string"),
"Col3": Series([4.0, float("nan"), 3.0]).astype("Int64"),
"Col4": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_nullable_int(parser):
df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"sides": "Int64"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": Series([4.0, float("nan"), 3.0]).astype("Int64"),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_float(parser):
df_result = read_xml(xml_types, dtype={"degrees": "float"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "float"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": Series([360, 360, 180]).astype("float"),
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_wrong_dtype(datapath, parser, iterparse):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
ValueError, match=('Unable to parse string "Everyday Italian" at position 0')
):
|
read_xml(filename, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse)
|
pandas.io.xml.read_xml
|
import pandas as pd
import numpy as np
####################################################################################################################
# preprocess ACS data between 2012 to 2017
table_dict = {"total_population": {"table": "B01003", "zip": "GEO.id2", "variable":"HD01_VD01"},
"median_household_income": {"table": "B19013", "zip": "GEO.id2", "variable":"HD01_VD01"},
"gini_index": {"table": "B19083", "zip": "GEO.id2", "variable":"HD01_VD01"},
"health_coverage_population": {"table": "B992701", "zip": "GEO.id2", "variable":"HD01_VD02"},
"same_house": {"table": "B07012", "zip": "GEO.id2", "variable":"HD01_VD06"},
"poverty_rate": {"table": "S1701", "zip": "GEO.id2", "variable":"HC02_EST_VC01"}}
unemp_dict = {"unemployment_rate": {"table": "S2301", "zip": "GEO.id2", "variable":"HC04_EST_VC01"},
"unemployment_rate2": {"table": "DP03", "zip": "GEO.id2", "variable":"HC03_VC07"}}
def read_ACS(year_list, table_dict, unemp_dict):
'''
'''
data_dict = {}
for year in year_list:
for t_name, value in table_dict.items():
if (year == 13) and (t_name == "same_house"):
pass
else:
table_name = t_name + str(year)
df = pd.read_csv(r"..\data\ACS_final\ACS_" + str(year) + "_" + \
value["table"] + "_" + t_name + ".csv")
data_dict[table_name] = [df.iloc[1:], value]
if year <= 14:
emp_table_name = "unemployment_rate" + str(year)
df = pd.read_csv(r"..\data\ACS_final\ACS_" + str(year) + "_" + \
unemp_dict["unemployment_rate"]["table"] + "_" + "unemployment_rate" + ".csv")
data_dict[emp_table_name] = [df.iloc[1:], unemp_dict["unemployment_rate"]]
else:
emp_table_name = "unemployment_rate" + str(year)
df = pd.read_csv(r"..\data\ACS_final\ACS_" + str(year) + "_" + \
unemp_dict["unemployment_rate2"]["table"] + "_" + "unemployment_rate" + ".csv")
data_dict[emp_table_name] = [df.iloc[1:], unemp_dict["unemployment_rate2"]]
return data_dict
def ACS_select(df_dict):
'''
'''
new_df_dict = {}
yearly_data = {}
for df_name, df_value in df_dict.items():
variable, year = df_name[:-2], df_name[-2:]
df_v = df_value[1]
df = df_value[0][[df_v["zip"], df_v["variable"]]]
new_df_dict[df_name] = {df_v["zip"]:"zipcode", df_v["variable"]:variable}
df = df.rename(columns=new_df_dict[df_name])
if year not in yearly_data:
yearly_data[year] = df
else:
yearly_data[year] = pd.merge(df, yearly_data[year], left_on="zipcode", right_on="zipcode")
same_home13 = pd.DataFrame({"zipcode": yearly_data["13"]["zipcode"],"same_house":([np.nan] * yearly_data["13"].shape[0])})
yearly_data["13"] = pd.merge(yearly_data["13"], same_home13, left_on="zipcode", right_on="zipcode")
return yearly_data
def ACS_integrater(yearly_data):
'''
'''
ordered_column = ["zipcode", "total_population", "median_household_income", "gini_index",
"health_coverage_population", "same_house", "poverty_rate",
"unemployment_rate", "year"]
full_df = pd.DataFrame(columns=ordered_column)
for year, df in yearly_data.items():
df["year"] = year
df = df[ordered_column]
full_df = pd.concat([full_df, df], join="inner")
return full_df
def ACS_do(year_list, table_dict, unemp_dict):
'''
'''
data_dict = read_ACS(year_list, table_dict, unemp_dict)
yearly_data = ACS_select(data_dict)
full_df = ACS_integrater(yearly_data)
return full_df
full_df = ACS_do([12,13,14,15,16,17], table_dict, unemp_dict)
full_df.to_csv(r"..\data\ACS_final\ACS_full.csv")
##################################################################################################################################
# preprocess data before 2011
total_population11 = pd.read_csv(r"..\data\ACS_final\ACS_11_B01003_total_population.csv")
median_household_income11 = pd.read_csv(r"..\data\ACS_final\ACS_11_B19013_median_household_income.csv")
gini_index11 = pd.read_csv(r"..\data\ACS_final\ACS_11_B19083_gini_index.csv")
same_house11 = pd.read_csv(r"..\data\ACS_final\ACS_11_B07012_same_house.csv")
unemployment_rate11 = pd.read_csv(r"..\data\ACS_final\ACS_11_S2301_unemployment_rate.csv")
poverty_rate12 = pd.read_csv(r"..\data\ACS_final\ACS_12_S1701_poverty_rate.csv")
health_coverage_population12 = pd.read_csv(r"..\data\ACS_final\ACS_12_B992701_health_coverage_population.csv")
total_population11 = total_population11[["GEO.id2", "HD01_VD01"]].rename(columns={"GEO.id2":"zip code", "HD01_VD01":"total_population"})
median_household_income11 = median_household_income11[["GEO.id2", "HD01_VD01"]].rename(columns={"GEO.id2":"zip code", "HD01_VD01":"median_household_income"})
gini_index11 = gini_index11[["GEO.id2", "HD01_VD01"]].rename(columns={"GEO.id2":"zip code", "HD01_VD01":"gini_index"})
same_house11 = same_house11[["GEO.id2", "HD01_VD06"]].rename(columns={"GEO.id2":"zip code", "HD01_VD06":"same_house"})
unemployment_rate11 = unemployment_rate11[["GEO.id2", "HC04_EST_VC01"]].rename(columns={"GEO.id2":"zip code", "HC04_EST_VC01":"unemployment_rate"})
poverty_rate12 = poverty_rate12[["GEO.id2", "HC02_EST_VC01"]].rename(columns={"GEO.id2":"zip code","HC02_EST_VC01":"poverty_rate"})
health_coverage_population12 = health_coverage_population12[["GEO.id2","HD01_VD02"]].rename(columns={"GEO.id2":"zip code","HD01_VD02":"health_coverage_population"})
final11 = pd.merge(total_population11, median_household_income11, left_on="zip code", right_on="zip code")
final11 = pd.merge(final11, gini_index11, left_on="zip code", right_on="zip code")
final11 = pd.merge(final11, same_house11, left_on="zip code", right_on="zip code")
final11 =
|
pd.merge(final11, unemployment_rate11, left_on="zip code", right_on="zip code")
|
pandas.merge
|
import os
import re
import requests
import numpy as np
import pandas as pd
import FinanceDataReader as fdr
from bs4 import BeautifulSoup
def cal_num_stock(url) :
#상장 주식수 크롤링
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
items = soup.find_all('table', {"summary" : "시가총액 정보"})
items = items[0].find_all("td")
nums = re.findall("\d+", str(items[2]))
num_stock = 0
digits = len(nums) - 1
for num in nums :
num_stock += int(num) * 1000 ** digits
digits -= 1
return num_stock
def cal_bb(stock, w=20, k=2) :
x = pd.Series(stock)
mbb = x.rolling(w, min_periods=1).mean()
ubb = mbb + k * x.rolling(w, min_periods=1).std()
lbb = mbb - k * x.rolling(w, min_periods=1).std()
return mbb, ubb, lbb
def cal_dmi(data, n=14, n_ADX=14) :
#https://github.com/Crypto-toolbox/pandas-technical-indicators/blob/master/technical_indicators.py : ADX
i = 0
UpI = []
DoI = []
while i + 1 <= data.index[-1] :
UpMove = data.loc[i + 1, "High"] - data.loc[i, "High"]
DoMove = data.loc[i, "Low"] - data.loc[i+1, "Low"]
if UpMove > DoMove and UpMove > 0 :
UpD = UpMove
else :
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0 :
DoD = DoMove
else :
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < data.index[-1]:
TR = max(data.loc[i + 1, 'High'], data.loc[i, 'Close']) - min(data.loc[i + 1, 'Low'], data.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s =
|
pd.Series(TR_l)
|
pandas.Series
|
# files are helper functions used to open files, get corpuses and save them back
import pandas as pd
import glob
def open_file(path:str):
# get content from txt file
f=open(path, "r")
if f.mode == 'r':
content=f.read()
return content
def open_many_files(path:str, filetype="/*.csv"):
# get all files on folder
all_files = glob.glob(path + filetype)
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
data =
|
pd.concat(li, axis=0, ignore_index=True)
|
pandas.concat
|
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = pd.DataFrame(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index` or `pandas.MultiIndex`
columns
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Notes
-----
..note :: In the future, another tests will be implemented to check
that the covariance matrix is symmetric and have positive variances.
Examples
--------
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array[1])
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [2, -4]]))
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [3, 4]]))
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data, dtype=float)
if not len(data.shape) == 2 and data.shape[0] == data.shape[1]:
raise TypeError("Covariance matrix must have two dimensions")
if not (np.diag(data) >= 0).all():
raise TypeError("Covariance matrix must have positive variance")
sym_limit = 10
# Round to avoid numerical fluctuations
if not (data.values.round(sym_limit) == data.values.T.round(sym_limit)).all():
raise TypeError("Covariance matrix must be symmetric")
@property
def size(self):
return self.data.values.shape[0]
def get_std(self):
"""
Extract standard deviations.
Returns
-------
`pandas.Series`
1d array of standard deviations
Examples
--------
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).get_std()
0 1.00000e+00
1 1.00000e+00
Name: STD, dtype: float64
"""
cov = self.to_sparse().diagonal()
std = np.sqrt(cov)
return pd.Series(std, index=self.data.index, name="STD")
def get_eig(self, tolerance=None):
"""
Extract eigenvalues and eigenvectors.
Parameters
----------
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
The replacement condition is implemented as:
.. math::
$$
\frac{e_i}{e_{MAX}} < tolerance
$$
Then, a `tolerance=1e-3` will replace all eigenvalues
1000 times smaller than the largest eigenvalue.
A `tolerance=0` will replace all negative eigenvalues.
Returns
-------
`Pandas.Series`
array of eigenvalues
`pandas.DataFrame`
matrix of eigenvectors
Notes
-----
.. note:: only the real part of the eigenvalues is preserved
.. note:: the discussion associated to the implementeation
of this algorithm is available [here](https://github.com/luca-fiorito-11/sandy/discussions/135)
Examples
--------
Extract eigenvalues of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[0]
0 1.40000e+00
1 6.00000e-01
Name: EIG, dtype: float64
Extract eigenvectors of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[1]
0 1
0 7.07107e-01 -7.07107e-01
1 7.07107e-01 7.07107e-01
Extract eigenvalues of covariance matrix.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig()[0]
0 8.90228e-02
1 1.01098e+00
Name: EIG, dtype: float64
Set up a tolerance.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig(tolerance=0.1)[0]
0 0.00000e+00
1 1.01098e+00
Name: EIG, dtype: float64
Test with negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig()[0]
0 3.00000e+00
1 -1.00000e+00
Name: EIG, dtype: float64
Replace negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig(tolerance=0)[0]
0 3.00000e+00
1 0.00000e+00
Name: EIG, dtype: float64
Check output size.
>>> cov = sandy.CategoryCov.random_cov(50, seed=11)
>>> assert cov.get_eig()[0].size == cov.data.shape[0] == 50
>>> sandy.CategoryCov([[1, 0.2, 0.1], [0.2, 2, 0], [0.1, 0, 3]]).get_eig()[0]
0 9.56764e-01
1 2.03815e+00
2 3.00509e+00
Name: EIG, dtype: float64
Real test on H1 file
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> ek = sandy.energy_grids.CASMO12
>>> err = endf6.get_errorr(ek_errorr=ek, err=1)
>>> cov = err.get_cov()
>>> cov.get_eig()[0].sort_values(ascending=False).head(7)
0 3.66411e-01
1 7.05311e-03
2 1.55346e-03
3 1.60175e-04
4 1.81374e-05
5 1.81078e-06
6 1.26691e-07
Name: EIG, dtype: float64
>>> assert not (cov.get_eig()[0] >= 0).all()
>>> assert (cov.get_eig(tolerance=0)[0] >= 0).all()
"""
E, V = scipy.linalg.eig(self.data)
E = pd.Series(E.real, name="EIG")
V = pd.DataFrame(V.real)
if tolerance is not None:
E[E/E.max() < tolerance] = 0
return E, V
def get_corr(self):
"""
Extract correlation matrix.
Returns
-------
df : :obj: `CetgoryCov`
correlation matrix
Examples
--------
>>> sandy.CategoryCov([[4, 2.4],[2.4, 9]]).get_corr()
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
"""
cov = self.data.values
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, self.get_std().values)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(cov, coeff).T, coeff)
df = pd.DataFrame(
corr,
index=self.data.index,
columns=self.data.columns,
)
return self.__class__(df)
def invert(self, rows=None):
"""
Method for calculating the inverse matrix.
Parameters
----------
tables : `bool`, optional
Option to use row calculation for matrix calculations. The
default is False.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`CategoryCov`
The inverse matrix.
Examples
--------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3])))
>>> S.invert()
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert()
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert(rows=1)
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
"""
index = self.data.index
columns = self.data.columns
M_nonzero_idxs, M_reduce = reduce_size(self.data)
cov = sps.csc_matrix(M_reduce.values)
rows_ = cov.shape[0] if rows is None else rows
data = sparse_tables_inv(cov, rows=rows_)
M_inv = restore_size(M_nonzero_idxs, data, len(self.data))
M_inv = M_inv.reindex(index=index, columns=columns).fillna(0)
return self.__class__(M_inv)
def log2norm_cov(self, mu):
"""
Transform covariance matrix to the one of the underlying normal
distribution.
Parameters
----------
mu : iterable
The desired mean values of the target lognormal distribution.
Returns
-------
`CategoryCov` of the underlying normal covariance matrix
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_cov(pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index))
A B C
A 2.19722e+00 1.09861e+00 1.38629e+00
B 1.09861e+00 2.39790e+00 1.60944e+00
C 1.38629e+00 1.60944e+00 2.07944e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series([1, 2, .5], index=["A", "B", "C"])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = [1, 2, .5]
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.array([1, 2, .5])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
Notes
-----
..notes:: Reference for the equation is 10.1016/j.nima.2012.06.036
.. math::
$$
cov(lnx_i, lnx_j) = \ln\left(\frac{cov(x_i,x_j)}{<x_i>\cdot<x_j>}+1\right)
$$
"""
mu_ = np.diag(1 / pd.Series(mu))
mu_ = pd.DataFrame(mu_, index=self.data.index, columns=self.data.index)
return self.__class__(np.log(self.sandwich(mu_).data + 1))
def log2norm_mean(self, mu):
"""
Transform mean values to the mean values of the undelying normal
distribution.
Parameters
----------
mu : iterable
The target mean values.
Returns
-------
`pd.Series` of the underlyig normal distribution mean values
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index)
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_mean([1, 1, 1])
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.ones(cov.data.shape[0])
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
Reindexing example
"""
mu_ = pd.Series(mu)
mu_.index = self.data.index
return np.log(mu_**2 / np.sqrt(np.diag(self.data) + mu_**2))
def sampling(self, nsmp, seed=None, rows=None, pdf='normal',
tolerance=None, relative=True):
"""
Extract perturbation coefficients according to chosen distribution with
covariance from given covariance matrix. See note for non-normal
distribution sampling.
The samples' mean will be 1 or 0 depending on `relative` kwarg.
Parameters
----------
nsmp : `int`
number of samples.
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator).
rows : `int`, optional, default is `None`
option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
pdf : `str`, optional, default is 'normal'
random numbers distribution.
Available distributions are:
* `'normal'`
* `'uniform'`
* `'lognormal'`
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
relative : `bool`, optional, default is `True`
flag to switch between relative and absolute covariance matrix
handling
* `True`: samples' mean will be 1
* `False`: samples' mean will be 0
Returns
-------
`sandy.Samples`
object containing samples
Notes
-----
.. note:: sampling with uniform distribution is performed on
diagonal covariance matrix, neglecting all correlations.
.. note:: sampling with lognormal distribution gives a set of samples
with mean=1 as lognormal distribution can not have mean=0.
Therefore, `relative` parameter does not apply to it.
Examples
--------
Draw 3 sets of samples using custom seed:
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11, rows=1)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sample = sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(1000000, seed=11)
>>> sample.data.cov()
0 1
0 9.98662e-01 3.99417e-01
1 3.99417e-01 9.98156e-01
Small negative eigenvalue:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, tolerance=0)
0 1
0 2.74945e+00 5.21505e+00
1 7.13927e-01 1.07147e+00
2 5.15435e-01 1.64683e+00
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, tolerance=0).data.cov()
0 1
0 9.98662e-01 -1.99822e-01
1 -1.99822e-01 2.99437e+00
Sampling with different `pdf`:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, pdf='uniform', tolerance=0)
0 1
0 -1.07578e-01 2.34960e+00
1 -6.64587e-01 5.21222e-01
2 8.72585e-01 9.12563e-01
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(3, seed=11, pdf='lognormal', tolerance=0)
0 1
0 3.03419e+00 1.57919e+01
1 5.57248e-01 4.74160e-01
2 4.72366e-01 6.50840e-01
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0).data.cov()
0 1
0 1.00042e+00 -1.58806e-03
1 -1.58806e-03 3.00327e+00
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0).data.cov()
0 1
0 1.00219e+00 1.99199e-01
1 1.99199e-01 3.02605e+00
`relative` kwarg usage:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=True).data.mean(axis=0)
0 1.00014e+00
1 9.99350e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=False).data.mean(axis=0)
0 1.41735e-04
1 -6.49679e-04
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=True).data.mean(axis=0)
0 9.98106e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=False).data.mean(axis=0)
0 -1.89367e-03
1 -7.15929e-04
dtype: float64
Lognormal distribution sampling indeoendency from `relative` kwarg
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=True).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=False).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
"""
dim = self.data.shape[0]
pdf_ = pdf if pdf != 'lognormal' else 'normal'
y = sample_distribution(dim, nsmp, seed=seed, pdf=pdf_) - 1
y = sps.csc_matrix(y)
# the covariance matrix to decompose is created depending on the chosen
# pdf
if pdf == 'uniform':
to_decompose = self.__class__(np.diag(np.diag(self.data)))
elif pdf == 'lognormal':
ones = np.ones(self.data.shape[0])
to_decompose = self.log2norm_cov(ones)
else:
to_decompose = self
L = sps.csr_matrix(to_decompose.get_L(rows=rows,
tolerance=tolerance))
samples = pd.DataFrame(L.dot(y).toarray(), index=self.data.index,
columns=list(range(nsmp)))
if pdf == 'lognormal':
# mean value of lognormally sampled distributions will be one by
# defaul
samples = np.exp(samples.add(self.log2norm_mean(ones), axis=0))
elif relative:
samples += 1
return sandy.Samples(samples.T)
@classmethod
def from_var(cls, var):
"""
Construct the covariance matrix from the variance vector.
Parameters
----------
var : 1D iterable
Variance vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_var(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 2.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_var((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 2.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_var([1, 2, 3])) is sandy.CategoryCov
"""
var_ = pd.Series(var)
cov_values = sps.diags(var_.values).toarray()
cov = pd.DataFrame(cov_values,
index=var_.index, columns=var_.index)
return cls(cov)
@classmethod
def from_stdev(cls, std):
"""
Construct the covariance matrix from the standard deviation vector.
Parameters
----------
std : `pandas.Series`
Standard deviations vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_stdev(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 4.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_stdev((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 4.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_stdev([1, 2, 3])) is sandy.CategoryCov
"""
std_ =
|
pd.Series(std)
|
pandas.Series
|
import pandas as pd
import plotly.graph_objects as go
from EnergyIntensityIndicators.utilities import lmdi_utilities
from EnergyIntensityIndicators.utilities.dataframe_utilities \
import DFUtilities as df_utils
class AdditiveLMDI:
def __init__(self, output_directory, energy_data, energy_shares,
base_year, end_year, total_label, lmdi_type='LMDI-I'):
self.energy_data = energy_data
self.energy_shares = energy_shares
self.total_label = total_label
self.lmdi_type = lmdi_type
self.end_year = end_year
self.base_year = base_year
self.output_directory = output_directory
def log_mean_divisia_weights(self):
"""Calculate log mean weights for the additive model where T=t, 0 = t - 1
Args:
energy_data (dataframe): energy consumption data
energy_shares (dataframe): Shares of total energy for
each category in level of aggregation total_label (str):
Name of aggregation of categories in level of aggregation
lmdi_type (str, optional): 'LMDI-I' or 'LMDI-II'.
Defaults to 'LMDI-I' because it is
'consistent in aggregation and perfect
in decomposition at the subcategory level'
(Ang, B.W., 2015. LMDI decomposition approach: A guide for
implementation. Energy Policy 86, 233-238.).
"""
print(f'ADDITIVE LMDI TYPE: {self.lmdi_type}')
if not self.lmdi_type:
self.lmdi_type = 'LMDI-I'
print(f'ADDITIVE LMDI TYPE: {self.lmdi_type}')
log_mean_shares_labels = [f"log_mean_shares_{col}" for
col in self.energy_shares.columns]
log_mean_weights = pd.DataFrame(index=self.energy_data.index)
log_mean_values_df = pd.DataFrame(index=self.energy_data.index)
for col in self.energy_shares.columns:
self.energy_data[f"{col}_shift"] = self.energy_data[col].shift(
periods=1, axis='index', fill_value=0)
# apply generally not preferred for row-wise operations but?
log_mean_values = self.energy_data[[col, f"{col}_shift"]].apply(lambda row:
lmdi_utilities.logarithmic_average(row[col],
row[f"{col}_shift"]), axis=1)
log_mean_values_df[col] = log_mean_values.values
self.energy_shares[f"{col}_shift"] = self.energy_shares[col].shift(periods=1, axis='index', fill_value=0)
# apply generally not preferred for row-wise operations but?
log_mean_shares = self.energy_shares[[col, f"{col}_shift"]].apply(lambda row:
lmdi_utilities.logarithmic_average(row[col], \
row[f"{col}_shift"]), axis=1)
self.energy_shares[f"log_mean_shares_{col}"] = log_mean_shares
log_mean_weights[f'log_mean_weights_{col}'] = log_mean_shares * log_mean_values
cols_to_drop1 = [col for col in self.energy_shares.columns if col.startswith('log_mean_shares_')]
self.energy_shares = self.energy_shares.drop(cols_to_drop1, axis=1)
cols_to_drop = [col for col in self.energy_shares.columns if col.endswith('_shift')]
self.energy_shares = self.energy_shares.drop(cols_to_drop, axis=1)
cols_to_drop_ = [col for col in self.energy_data.columns if col.endswith('_shift')]
self.energy_data = self.energy_data.drop(cols_to_drop_, axis=1)
if self.lmdi_type == 'LMDI-I':
return log_mean_values_df
elif self.lmdi_type == 'LMDI-II':
sum_log_mean_shares = self.energy_shares[log_mean_shares_labels].sum(axis=1)
log_mean_weights_normalized = log_mean_weights.divide(sum_log_mean_shares.values.reshape(len(sum_log_mean_shares), 1))
log_mean_weights_normalized = log_mean_weights_normalized.drop([c for c in log_mean_weights_normalized.columns \
if not c.startswith('log_mean_weights_')], axis=1)
return log_mean_weights_normalized
else:
return log_mean_values_df
def calculate_effect(self, ASI):
"""Calculate effect from changes to activity, structure,
and intensity in the additive model
"""
ASI['effect'] = ASI.sum(axis=1)
return ASI
@staticmethod
def aggregate_additive(additive, base_year):
"""Aggregate additive data (allows for loop through every year as a base year, if desired)"""
cols = [c for c in list(additive.columns) if c != 'Year']
additive.loc[additive['Year'] <= base_year, cols] = 0
additive = additive.set_index('Year')
df = additive.cumsum(axis=0)
return df
def decomposition(self, ASI):
"""Format component data, collect overall effect, return aggregated
dataframe of the results for the additive LMDI model.
"""
# ASI.pop('lower_level_structure', None)
ASI_df = df_utils().merge_df_list(list(ASI.values()))
df = self.calculate_effect(ASI_df)
df = df.reset_index()
if 'Year' not in df.columns:
df = df.rename(columns={'index': 'Year'})
aggregated_df = self.aggregate_additive(df, self.base_year)
aggregated_df["@filter|Measure|BaseYear"] = self.base_year
return aggregated_df
def visualizations(self, data, base_year, end_year, loa, model, energy_type, rename_dict):
"""Visualize additive LMDI results in a waterfall chart, opens in internet browsers and
user must save manually (from plotly save button)
"""
data = data[data['@filter|Model'] == model.capitalize()]
structure_cols = []
for column in data.columns:
if column.endswith('Structure'):
structure_cols.append(column)
if len(structure_cols) == 1:
data = data.rename(columns={structure_cols[0]: '@filter|Measure|Structure'})
elif len(structure_cols) > 1:
data['@filter|Measure|Structure'] = data[structure_cols].sum(axis=1) # Current level total structure
to_drop = [s for s in structure_cols if s != '@filter|Measure|Structure']
data = data.drop(to_drop, axis=1)
x_data = []
if '@filter|Measure|Structure' in data.columns:
x_data.append('@filter|Measure|Structure')
else:
for c in data.columns:
if 'Structure' in c:
x_data.append(c)
if "@filter|Measure|Intensity" in data.columns:
x_data.append("@filter|Measure|Intensity")
else:
for c in data.columns:
if 'Intensity' in c:
x_data.append(c)
if "@filter|Measure|Activity" in data.columns:
x_data.append("@filter|Measure|Activity")
else:
for c in data.columns:
if c.endswith('Activity'):
x_data.append(c)
loa = [l.replace("_", " ") for l in loa]
if loa[0] == loa[-1]:
loa = [loa[0]]
else:
loa = [loa[0], loa[-1]]
final_year = max(data['@timeseries|Year'])
data_base = data[data['@timeseries|Year'] == base_year][x_data]
data_base['intial_energy'] = self.energy_data.loc[base_year, self.total_label]
data = data[data['@timeseries|Year'] == end_year][x_data]
if self.end_year in self.energy_data.index:
data['final_energy'] = self.energy_data.loc[end_year, self.total_label]
else:
data['final_energy'] = self.energy_data.loc[max(self.energy_data.index), self.total_label]
x_data = ['intial_energy'] + x_data + ['final_energy']
y_data =
|
pd.concat([data_base, data], ignore_index=True, axis=0, sort=False)
|
pandas.concat
|
"""Represent SQL tokens as Pandas operations.
"""
from sqlalchemy.sql import operators
from sqlalchemy import sql
from sqlalchemy import util
from sqlalchemy import types as sqltypes
import functools
import pandas as pd
import numpy as np
import collections
from . import dbapi
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.ext.compiler import compiles
def aggregate_fn(package=None):
"""Mark a Python function as a SQL aggregate function.
The function should typically receive a Pandas Series object
as an argument and return a scalar result.
E.g.::
from calchipan import aggregate_fn
@aggregate_fn()
def stddev(values):
return values.std()
The object is converted into a SQLAlchemy GenericFunction
object, which can be used directly::
stmt = select([stddev(table.c.value)])
or via the SQLAlchemy ``func`` namespace::
from sqlalchemy import func
stmt = select([func.stddev(table.c.value)])
Functions can be placed in ``func`` under particular
"package" names using the ``package`` argument::
@aggregate_fn(package='numpy')
def stddev(values):
return values.std()
Usage via ``func`` is then::
from sqlalchemy import func
stmt = select([func.numpy.stddev(table.c.value)])
An aggregate function that is called with multiple expressions
will be passed a single argument that is a list of Series
objects.
"""
def mark_aggregate(fn):
kwargs = {'name': fn.__name__}
if package:
kwargs['package'] = package
custom_func = type("%sFunc" % fn.__name__, (GenericFunction,), kwargs)
@compiles(custom_func, 'pandas')
def _compile_fn(expr, compiler, **kw):
return FunctionResolver(fn,
compiler.process(expr.clauses, **kw), True)
return custom_func
return mark_aggregate
def non_aggregate_fn(package=None):
"""Mark a Python function as a SQL non-aggregate function.
The function should receive zero or more scalar
Python objects as arguments and return a scalar result.
E.g.::
from calchipan import non_aggregate_fn
@non_aggregate_fn()
def add_numbers(value1, value2):
return value1 + value2
Usage and behavior is identical to that of :func:`.aggregate_fn`,
except that the function is not treated as an aggregate. Function
expressions are also expanded out to individual positional arguments,
whereas an aggregate always receives a single structure as an argument.
"""
def mark_non_aggregate(fn):
kwargs = {'name': fn.__name__}
if package:
kwargs['package'] = package
custom_func = type("%sFunc" % fn.__name__, (GenericFunction,), kwargs)
@compiles(custom_func, 'pandas')
def _compile_fn(expr, compiler, **kw):
return FunctionResolver(fn,
compiler.process(expr.clauses, **kw), False)
return custom_func
return mark_non_aggregate
ResolverContext = collections.namedtuple("ResolverContext",
["cursor", "namespace", "params"])
class Resolver(object):
def __call__(self, cursor, namespace, params):
"""Resolve this expression.
Resolvers are callables; this is called by the DBAPI."""
return self.resolve(ResolverContext(cursor, namespace, params))
def resolve(self, ctx):
"""Resolve this expression given a ResolverContext.
Front end for resolution, linked to top-level __call__()."""
raise NotImplementedError()
class NullResolver(Resolver):
def resolve(self, ctx):
pass
class ColumnElementResolver(Resolver):
"""Top level class for SQL expressions."""
def resolve_expression(self, ctx, product):
"""Resolve as a column expression.
Return value here is typically a Series or a scalar
value.
"""
raise NotImplementedError()
class FromResolver(Resolver):
"""Top level class for 'from' objects, things you can select rows from."""
def resolve_dataframe(self, ctx, names=True):
"""Resolve as a dataframe.
Return value here is a DataFrame object.
"""
raise NotImplementedError()
class FunctionResolver(ColumnElementResolver):
def __init__(self, fn, expr, aggregate):
self.fn = fn
self.expr = expr
self.aggregate = aggregate
def resolve_expression(self, ctx, product):
if self.aggregate:
q = self.fn(self.expr.resolve_expression(
ctx, product))
q = pd.Series([q], name="aggregate")
else:
q = self.fn(*self.expr.resolve_expression(
ctx, product))
return q
class ConstantResolver(ColumnElementResolver):
def __init__(self, value):
self.value = value
def resolve_expression(self, ctx, product):
return self.value
class LiteralResolver(ColumnElementResolver):
def __init__(self, value):
self.value = value
self.name = str(id(self))
def resolve_expression(self, ctx, product):
return self.value
@property
def df_index(self):
return self.name
class ColumnResolver(ColumnElementResolver):
def __init__(self, name, tablename):
self.name = name
self.tablename = tablename
def resolve_expression(self, ctx, product):
if product is None:
df = TableResolver(self.tablename).resolve_dataframe(ctx)
else:
df = product.resolve_dataframe(ctx)
return df[self.df_index]
@property
def df_index(self):
return "#T_%s_#C_%s" % (self.tablename, self.name)
class UnaryResolver(ColumnElementResolver):
def __init__(self, expression, operator, modifier):
self.operator = operator
self.modifier = modifier
self.expression = expression
def resolve_expression(self, ctx, product):
return self.expression.resolve_expression(
ctx, product)
@property
def df_index(self):
return self.expression.df_index
class LabelResolver(Resolver):
def __init__(self, expression, name):
self.expression = expression
self.name = name
def resolve_expression(self, ctx, product):
return self.expression.resolve_expression(ctx, product)
@property
def df_index(self):
return self.name
class BinaryResolver(ColumnElementResolver):
def __init__(self, left, right, operator):
self.left = left
self.right = right
self.operator = operator
def resolve_expression(self, ctx, product):
return self.operator(
self.left.resolve_expression(ctx, product),
self.right.resolve_expression(ctx, product),
)
class ClauseListResolver(ColumnElementResolver):
def __init__(self, expressions, operator):
self.expressions = expressions
self.operator = operator
def resolve_expression(self, ctx, product):
exprs = [expr.resolve_expression(ctx, product)
for expr in self.expressions]
if self.operator is operators.comma_op:
if len(exprs) == 1:
return exprs[0]
else:
return exprs
else:
return functools.reduce(self.operator, exprs)
class BindParamResolver(ColumnElementResolver):
def __init__(self, name):
self.name = name
def resolve_expression(self, ctx, product):
return ctx.params[self.name]
class DerivedResolver(FromResolver):
def __init__(self, dataframe):
self.dataframe = dataframe
def resolve_dataframe(self, ctx, names=True):
return self.dataframe
class TableResolver(FromResolver):
def __init__(self, tablename, autoincrement_col=None):
self.tablename = tablename
self.autoincrement_col = autoincrement_col
def resolve_dataframe(self, ctx, names=True):
df = ctx.namespace[self.tablename]
if names:
# performance tests show that the rename() here is
# not terribly expensive as long as copy=False. Adding the
# index as a column is much more expensive, however,
# though is not as common of a use case.
# the renamed dataframe can be cached, though this means
# that all mutation operations need to clear the cache also.
# a quicker route to having the index accessible is to
# add an explicit copy of the index to the DataFrame outside
# of the SQL dialect - that way it won't be copied here
# each time.
renamed_df = df.rename(
columns=dict(
(k, "#T_%s_#C_%s" % (self.tablename, k))
for k in df.keys()
), copy=False
)
if self.autoincrement_col and self.autoincrement_col not in df:
renamed_df["#T_%s_#C_%s" %
(self.tablename, self.autoincrement_col)] = df.index
return renamed_df
elif self.autoincrement_col and self.autoincrement_col not in df:
renamed_df = df.copy()
renamed_df[self.autoincrement_col] = df.index
return renamed_df
else:
return df
class AliasResolver(FromResolver):
def __init__(self, table, aliasname):
self.table = table
self.aliasname = aliasname
def resolve_dataframe(self, ctx, names=True):
df = self.table.resolve_dataframe(ctx, names=False)
if names:
df = df.rename(
columns=dict(
(k, "#T_%s_#C_%s" % (self.aliasname, k))
for k in df.keys()
), copy=False
)
return df
class JoinResolver(FromResolver):
def __init__(self, left, right, onclause, isouter):
self.left = left
self.right = right
self.onclause = onclause
self.isouter = isouter
def resolve_dataframe(self, ctx, names=True):
df1 = left = self.left.resolve_dataframe(ctx)
df2 = self.right.resolve_dataframe(ctx)
if self.isouter:
left['_cp_left_index'] = left.index
straight_binaries, remainder = self._produce_join_expressions(df1, df2)
df1 = self._merge_straight_binaries(ctx, df1, df2, straight_binaries)
df1 = self._merge_remainder(ctx, left, df1, df2,
straight_binaries, remainder)
return df1.where(
|
pd.notnull(df1)
|
pandas.notnull
|
import numpy as np
import pandas as pd
import itertools
import math
import re
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
plt.style.use('seaborn-white')
class MAM:
"""
MAM (Marketing Attribution Models) is a class inspired on the R Package ‘GameTheoryAllocation’ from <NAME>
and ‘ChannelAttribution’ from <NAME> and <NAME> that was created to bring these concepts to
Python and to help us understand how the different marketing channels behave during the customer journey.
Parameters:
df = None by default, but should only be None if choosing to use a random dataframe. Otherwise,
it has to receive a Pandas dataframe;
time_till_conv_colname = None by default. Column name in the df containing the time in hours untill
the moment of the conversion. The column must have the same elements as the
channels_colname has.
Values could be on a list ou a string with a separator;
conversion_value = 1 by default. Integer that represents a monetary value of a 'conversion', can
also receive a string indicating the column name on the dataframe containing the
conversion values;
channels_colname = None by default. Column name in the df containing the different channels during the
customer journey. The column must have the same elements as the time_till_conv_colname
has.
Values could be on a list ou a string with a separator;
journey_with_conv_colname = None by default.
group_channels = False by default. Most important parameter on this class. This indicates the input
format of the dataframe.
True = Each row represents a user session that will be grouped
into a user journey;
False = Each row represents a user journey and the columns
group_channels_by_id_list = Empty list by default.
group_timestamp_colname = None by default.
create_journey_id_based_on_conversion = False by default.
path_separator = ' > ' by default. If using 'group_channels = True', this should match the separator
being used on the inputed dataframe in the channels_colname;
verbose = False by default. Internal parameter for printing while working with MAM;
random_df = False by default. Will create a random dataframe with testing purpose;
"""
def __init__(
self,
df=None,
time_till_conv_colname=None,
conversion_value=1,
channels_colname=None,
journey_with_conv_colname=None,
group_channels=False,
group_channels_by_id_list=[],
group_timestamp_colname=None,
create_journey_id_based_on_conversion = False,
path_separator=' > ',
verbose=False,
random_df=False):
self.verbose = verbose
self.sep = path_separator
self.group_by_channels_models = None
##########################################################
##### Section 0: Funcions needed to create the class #####
##########################################################
def journey_id_based_on_conversion(df,
group_id,
transaction_colname):
"""
Internal function that creates a journey_id column into a DF containing a User ID and Boolean column
that indicates if there has been a conversion on that instance
"""
df_temp = df.copy()
for i in group_id:
df_temp[i] = df_temp[i].apply(str)
#Converting bool column to int
df_temp['journey_id'] = df_temp[transaction_colname].map(lambda x: 0 if x == False else 1)
#Cumsum for each transaction to expand the value for the rows that did not have a transaction
df_temp['journey_id'] = df_temp.groupby(group_id)['journey_id'].cumsum()
#Subtracting 1 only for the row that had a transaction
t = df_temp['journey_id'] - 1
df_temp['journey_id'] = df_temp['journey_id'].where((df_temp[transaction_colname] == False), t).apply(str)
df_temp['journey_id'] = 'id:' + df_temp[group_id[0]] + '_J:' + df_temp['journey_id']
del t
return df_temp
def random_mam_data_frame(user_id = 300, k = 50000, conv_rate = 0.4):
import random
channels = ['Direct', 'Direct', 'Facebook', 'Facebook', 'Facebook',
'Google Search', 'Google Search', 'Google Search', 'Google Search', 'Google Display',
'Organic', 'Organic', 'Organic', 'Organic', 'Organic', 'Organic',
'Email Marketing', 'Youtube', 'Instagram']
has_transaction = ([True] * int(conv_rate * 100)) + ([False] * int((1 - conv_rate) * 100))
user_id = list(range(0, 700))
day = range(1, 30)
month = range(1, 12)
res = []
for i in [channels,has_transaction, user_id, day, month]:
res.append(random.choices(population=i, k=k))
df = pd.DataFrame(res).transpose()
df.columns = ['channels', 'has_transaction', 'user_id', 'day', 'month']
df['visitStartTime'] = '2020-' + df['month'].apply(lambda val: str(val) if val > 9 else '0' + str(val)) +'-'+ df['day'].apply(lambda val: str(val) if val > 9 else '0' + str(val))
return df
#####################################################
##### Section 1: Creating object and attributes #####
#####################################################
###########################
#### random_df == True ####
###########################
if random_df:
df = random_mam_data_frame()
group_channels=True
channels_colname = 'channels'
journey_with_conv_colname= 'has_transaction'
group_channels_by_id_list=['user_id']
group_timestamp_colname = 'visitStartTime'
create_journey_id_based_on_conversion = True
self.original_df = df.copy()
################################
#### group_channels == True ####
################################
if group_channels:
# Copying, sorting and converting variables
df = df.reset_index().copy()
df[group_timestamp_colname] = pd.to_datetime( df[group_timestamp_colname])
df.sort_values( group_channels_by_id_list + [group_timestamp_colname], inplace=True)
if create_journey_id_based_on_conversion:
df = journey_id_based_on_conversion(df = df,
group_id = group_channels_by_id_list,
transaction_colname = journey_with_conv_colname)
group_channels_by_id_list = ['journey_id']
# Grouping channels based on group_channels_by_id_list
######################################################
self.print('group_channels == True')
self.print('Grouping channels...')
temp_channels = df.groupby(group_channels_by_id_list)[
channels_colname].apply(list).reset_index()
self.channels = temp_channels[channels_colname]
self.print('Status: Done')
# Grouping timestamp based on group_channels_by_id_list
####################################################
self.print('Grouping timestamp...')
df_temp = df[group_channels_by_id_list + [group_timestamp_colname]]
df_temp = df_temp.merge(
df.groupby(group_channels_by_id_list)[group_timestamp_colname].max(),
on=group_channels_by_id_list)
# calculating the time till conversion
######################################
df_temp['time_till_conv'] = (df_temp[group_timestamp_colname + '_y'] -
df_temp[group_timestamp_colname + '_x']).astype('timedelta64[h]')
df_temp = df_temp.groupby(group_channels_by_id_list)[
'time_till_conv'].apply(list).reset_index()
self.time_till_conv = df_temp['time_till_conv']
self.print('Status: Done')
if journey_with_conv_colname is None:
# If journey_with_conv_colname is None, we will assume that
# all journeys ended in a conversion
###########################################################
self.journey_with_conv = self.channels.apply(lambda x: True)
self.journey_id = pd.Series(df[group_channels_by_id_list].unique())
else:
# Grouping unique journeys and whether the journey ended with a
# conversion
##########################################################
self.print('Grouping journey_id and journey_with_conv...')
df_temp = df[group_channels_by_id_list +
[journey_with_conv_colname]]
temp_journey_id_conv = df_temp.groupby(group_channels_by_id_list)[
journey_with_conv_colname].max().reset_index()
self.journey_id = temp_journey_id_conv[group_channels_by_id_list]
self.print('Status: Done')
self.journey_with_conv = temp_journey_id_conv[journey_with_conv_colname]
self.print('Status: Done')
#################################
#### group_channels == False ####
#################################
else:
self.journey_id = df[group_channels_by_id_list]
#####################
### self.channels ###
#####################
# converts channels str to list of channels
if isinstance(df[channels_colname][0], str):
self.channels = df[channels_colname].apply(lambda x: x.split(self.sep))
else:
self.channels = df[channels_colname]
###########################
### self.time_till_conv ###
###########################
if time_till_conv_colname is None:
self.time_till_conv = self.channels.apply(lambda x: list(range(len(x)))[::-1])
self.time_till_conv = self.time_till_conv.apply(lambda x: list(np.asarray(x) * 24 ))
else:
if isinstance(df[channels_colname][0], str):
self.time_till_conv = df[time_till_conv_colname].apply(lambda x: [float(value) for value in x.split(self.sep)])
else:
self.time_till_conv = df[time_till_conv_colname]
##############################
### self.journey_with_conv ###
##############################
if journey_with_conv_colname is None:
self.journey_with_conv = self.channels.apply(lambda x: True)
else:
self.journey_with_conv = df[journey_with_conv_colname]
########################
### conversion_value ###
########################
# conversion_value could be a single int value or a panda series
if isinstance(conversion_value, int):
self.conversion_value = self.journey_with_conv.apply(lambda valor: conversion_value if valor else 0)
else:
self.conversion_value = df[conversion_value]
#################
### DataFrame ###
#################
self.DataFrame = None
self.as_pd_dataframe()
######################################
##### Section 2: Output methods #####
######################################
def print(self, *args, **kwargs):
if self.verbose:
print(*args, **kwargs)
def as_pd_dataframe(self):
"""
Return inputed attributes as a Pandas Data Frame on self.DataFrame
"""
if not(isinstance(self.DataFrame, pd.DataFrame)):
if isinstance(self.journey_id, pd.DataFrame):
self.DataFrame = self.journey_id
self.DataFrame['channels_agg'] = self.channels.apply(lambda x: self.sep.join(x))
self.DataFrame['time_till_conv_agg'] = self.time_till_conv.apply(lambda x : self.sep.join([str(value) for value in x]))
self.DataFrame['converted_agg'] = self.journey_with_conv
self.DataFrame['conversion_value'] = self.conversion_value
else:
self.DataFrame = pd.DataFrame({'journey_id': self.journey_id,
'channels_agg': self.channels.apply(lambda x: self.sep.join(x)),
'time_till_conv_agg': self.time_till_conv.apply(lambda x : self.sep.join([str(value) for value in x])),
'converted_agg': self.journey_with_conv,
'conversion_value': self.conversion_value})
return self.DataFrame
def attribution_all_models(
self,
model_type='all',
last_click_non_but_not_this_channel='Direct',
time_decay_decay_over_time=0.5,
time_decay_frequency=128,
shapley_size=4,
shapley_order=False,
shapley_values_col= 'conv_rate',
markov_transition_to_same_state=False,
group_by_channels_models=True):
"""
Runs all heuristic models on this class and returns a data frame.
Models: attribution_last_click_non, attribution_first_click, attribution_linear, attribution_position_based, attribution_time_decay
Parameters:
model_type = ['all',
'heuristic'
'algorithmic']
"""
if model_type == 'all':
heuristic = True
algorithmic = True
elif model_type == 'heuristic':
heuristic = True
algorithmic = False
else:
heuristic = False
algorithmic = True
if heuristic:
# Running attribution_last_click
self.attribution_last_click(group_by_channels_models=group_by_channels_models)
# Running attribution_last_click_non
self.attribution_last_click_non(but_not_this_channel = last_click_non_but_not_this_channel)
# Running attribution_first_click
self.attribution_first_click(group_by_channels_models=group_by_channels_models)
# Running attribution_linear
self.attribution_linear(
group_by_channels_models=group_by_channels_models)
# Running attribution_position_based
self.attribution_position_based(group_by_channels_models=group_by_channels_models)
# Running attribution_time_decay
self.attribution_time_decay(
decay_over_time = time_decay_decay_over_time,
frequency=time_decay_frequency,
group_by_channels_models=group_by_channels_models)
if algorithmic:
# Running attribution_shapley
self.attribution_shapley(size=shapley_size,
order=shapley_order,
group_by_channels_models=group_by_channels_models,
values_col=shapley_values_col)
# Running attribution_shapley
self.attribution_markov(transition_to_same_state=markov_transition_to_same_state)
return self.group_by_channels_models
def plot(self,
model_type='all',
sort_model=None,
number_of_channels=10,
other_df = None,
*args, **kwargs):
"""
Barplot of the results that were generated and stored on the variable self.group_by_channels_models
Parameters:
model_type = ['all',
'heuristic'
'algorithmic']
sort_model = has to be a string and accept regex by inputing r'example'
other_df = None. In case the user wants to use a new data frame
"""
model_types = {'all':'all',
'heuristic': r'heuristic',
'algorithmic': r'algorithmic'}
if not isinstance(other_df, pd.DataFrame):
# Checking if there are any results on self.group_by_channels_models
if isinstance(self.group_by_channels_models, pd.DataFrame):
df_plot = self.group_by_channels_models
else:
ax = 'self.group_by_channels_models == None'
else:
df_plot = other_df
# Sorting self.group_by_channels_models
if sort_model != None:
# List comprehension to accept regex
df_plot = df_plot.sort_values([[x for x in df_plot.columns if (re.search(sort_model, x))]][0],
ascending=True)
#Selecting columns that matches the pattern
if model_types[model_type] != 'all':
df_plot = df_plot[['channels'] + [x for x in df_plot.columns if re.search(model_types[model_type], x)]]
# Subsetting the results based on the number of channels to be shown
df_plot = df_plot.tail(number_of_channels)
# Melting DF so the results are devided into 'channels', 'variable' and 'value'
df_plot = pd.melt(df_plot,id_vars='channels')
# Plot Parameters
ax, fig = plt.subplots(figsize=(20,7))
ax = sns.barplot(data = df_plot, hue = 'variable', y = 'value', x = 'channels', *args, **kwargs)
plt.xticks(rotation=15)
ax.legend(loc = 'upper left', frameon = True, fancybox = True)
ax.axhline(0, color='black', linestyle='-', alpha=1,lw=2)
ax.grid(color='gray', linestyle=':', linewidth=1, axis='y')
ax.set_frame_on(False)
return ax
def channels_journey_time_based_overwrite(
self, selected_channel='Direct', time_window=24, order=1, inplace=False):
"""
Overwrites channels in the conversion jorney that matches the criteria with the previous
channel in the journey:
- Is equal to the selected_channel;
- The diference between the contacts is less than the time_window selected;
Parameters:
selected_channel = channel to be overwritten;
time_window = the time window in hours that the selected channel will be overwritten;
order = how many times the function will loop throught the same journey;
ex: journey [Organic > Direct > Direct]
order 1 output: [Organic > Organic > Direct]
order 2 output: [Organic > Organic > Organic]
"""
frame = self.channels.to_frame(name='channels')
frame['time_till_conv_window'] = self.time_till_conv.apply(lambda time_till_conv: [time_window + 1] + [
time - time_till_conv[i + 1] for i, time in enumerate(time_till_conv) if i < len(time_till_conv) - 1])
frame['time_till_conv_window'] = frame['time_till_conv_window'].apply(
lambda time_till_conv: np.absolute(np.asarray(time_till_conv)).tolist())
loop_count = 0
while loop_count < order:
frame['channels'] = frame.apply(lambda x: [x.channels[i - 1] if ((canal == selected_channel) & (
time < time_window)) else canal for i, (canal, time) in enumerate(zip(x.channels, x.time_till_conv_window))], axis=1)
loop_count += 1
if inplace:
self.channels = frame['channels'].copy()
new_channels = None
else:
new_channels = frame['channels'].copy()
return new_channels
def group_by_results_function(self, channels_value, model_name):
"""
Internal function to generate the group_by_channels_models. A pandas DF containing
the attributed values for each channel
"""
channels_list = []
self.channels.apply(lambda x: channels_list.extend(x))
values_list = []
channels_value.apply(lambda x: values_list.extend(x))
frame = pd.DataFrame(
{'channels': channels_list, 'value': values_list})
frame = frame.groupby(['channels'])['value'].sum()
if isinstance(self.group_by_channels_models, pd.DataFrame):
frame = frame.reset_index()
frame.columns = ['channels', model_name]
self.group_by_channels_models = pd.merge(self.group_by_channels_models, frame,
how='outer', on=['channels']).fillna(0)
else:
self.group_by_channels_models = frame.reset_index()
self.group_by_channels_models.columns = ['channels', model_name]
return frame
###################################################
##### Section 3: Channel Attribution methods #####
###################################################
def attribution_last_click(self, group_by_channels_models=True):
"""
The last touchpoint receives all the credit
Parameters:
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_last_click_heuristic'
# Results part 1: Column values
# Results in the same format as the DF
channels_value = self.channels.apply(
lambda channels: np.asarray(([0] * (len(channels) - 1)) + [1]))
# multiplying the results with the conversion value
channels_value = channels_value * self.conversion_value
# multiplying with the boolean column that indicates whether the conversion
# happened
channels_value = channels_value * self.journey_with_conv.apply(int)
channels_value = channels_value.apply(lambda values: values.tolist())
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Results part 2: Results
if group_by_channels_models:
# Selecting last channel from the series
channels_series = self.channels.apply(lambda x: x[-1])
# Creating a data_frame where we have the last channel and the
# conversion values
frame = channels_series.to_frame(name='channels')
# multiplying with the boolean column that indicates if the conversion
# happened
frame['value'] = self.conversion_value * \
self.journey_with_conv.apply(int)
# Grouping by channels and adding the values
frame = frame.groupby(['channels'])['value'].sum()
# Grouped Results
if isinstance(self.group_by_channels_models, pd.DataFrame):
frame = frame.reset_index()
frame.columns = ['channels', model_name]
self.group_by_channels_models = pd.merge(self.group_by_channels_models, frame, how='outer', on=['channels']).fillna(0)
else:
self.group_by_channels_models = frame.reset_index()
self.group_by_channels_models.columns = ['channels', model_name]
else:
frame = 'group_by_channels_models = False'
return (channels_value, frame)
def attribution_last_click_non(self, but_not_this_channel='Direct', group_by_channels_models=True):
"""
All the traffic from a Specific channel is ignored,
and 100% of the credit for the sale goes to the last channel that the customer clicked through from before converting
Parameters:
but_not_this_channel = channel to be overwritten
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_last_click_non_' + but_not_this_channel + '_heuristic'
# Results part 1: Column values
# Results in the same format as the DF
channels_value = self.channels.apply(
lambda canais: np.asarray(
[
1 if i == max(
[
i if canal != but_not_this_channel else 0 for i,
canal in enumerate(canais)]) else 0 for i,
canal in enumerate(canais)]))
# multiplying the results with the conversion value
channels_value = channels_value * self.conversion_value
# multiplying with the boolean column that indicates if the conversion
# happened
channels_value = channels_value * self.journey_with_conv.apply(int)
channels_value = channels_value.apply(lambda values: values.tolist())
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Results part 2: Results
if group_by_channels_models:
# Selecting the last channel that is not the one chosen
channels_series = self.channels.apply(
lambda canais: (
canais[-1] if len([canal for canal in canais if canal != but_not_this_channel]) == 0
else canais[max([i for i, canal in enumerate(canais) if canal != but_not_this_channel])]))
# Creating a data_frame where we have the last channel and the
# conversion values
frame = channels_series.to_frame(name='channels')
# multiplying with the boolean column that indicates whether the conversion
# happened
frame['value'] = self.conversion_value * \
self.journey_with_conv.apply(int)
# Grouping by channels and adding the values
frame = frame.groupby(['channels'])['value'].sum()
if isinstance(self.group_by_channels_models, pd.DataFrame):
frame = frame.reset_index()
frame.columns = ['channels', model_name]
self.group_by_channels_models = pd.merge(self.group_by_channels_models, frame, how='outer', on=['channels']).fillna(0)
else:
self.group_by_channels_models = frame.reset_index()
self.group_by_channels_models.columns = ['channels', model_name]
return (channels_value, frame)
def attribution_first_click(self, group_by_channels_models=True):
"""
The first touchpoint recieves all the credit
Parameters:
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_first_click_heuristic'
# Results part 1: Column values
###############################
# Results in the same format as the DF
channels_value = self.channels.apply(
lambda channels: np.asarray([1] + ([0] * (len(channels) - 1))))
# multiplying the results with the conversion value
channels_value = channels_value * self.conversion_value
# multiplying with the boolean column that indicates if the conversion
# happened
channels_value = channels_value * self.journey_with_conv.apply(int)
channels_value = channels_value.apply(lambda values: values.tolist())
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Results part 2: Grouped Results
#################################
if group_by_channels_models:
# Selecting first channel from the series
channels_series = self.channels.apply(lambda x: x[0])
# Creating a data_frame where we have the last channel and the
# conversion values
frame = channels_series.to_frame(name='channels')
# multiplying with the boolean column that indicates if the conversion
# happened
frame['value'] = self.conversion_value * \
self.journey_with_conv.apply(int)
# Grouping by channels and adding the values
frame = frame.groupby(['channels'])['value'].sum()
if isinstance(self.group_by_channels_models, pd.DataFrame):
frame = frame.reset_index()
frame.columns = ['channels', model_name]
self.group_by_channels_models = pd.merge(self.group_by_channels_models, frame, how='outer', on=['channels']).fillna(0)
else:
self.group_by_channels_models = frame.reset_index()
self.group_by_channels_models.columns = ['channels', model_name]
return (channels_value, frame)
def attribution_linear(self, group_by_channels_models=True):
"""
Each touchpoint in the conversion path has an equal value
Parameters:
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_linear_heuristic'
channels_count = self.channels.apply(lambda x: len(x))
channels_value = (self.conversion_value * self.journey_with_conv.apply(int) /
channels_count).apply(lambda x: [round(x, 2)]) * channels_count
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Grouping the attributed values for each channel
if group_by_channels_models:
frame = self.group_by_results_function(channels_value, model_name)
else:
frame = 'group_by_channels_models = False'
return (channels_value, frame)
def attribution_position_based(
self, list_positions_first_middle_last=[
0.4, 0.2, 0.4], group_by_channels_models=True):
"""
First and last contact have preset values, middle touchpoints are evenly distributed with the chosen weight.
default:
- First channel = 0.4
- Distributed among the middle channels = 0.2
- Last channel = 0.4
Parameters:
list_positions_first_middle_last = list with percentages that will be given to each position
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_position_based_' + '_'.join([str(value) for value in list_positions_first_middle_last]) + '_heuristic'
# Selecting last channel from the series
channels_value = self.channels.apply(
lambda canais: np.asarray([1]) if len(canais) == 1
else np.asarray([list_positions_first_middle_last[0] + list_positions_first_middle_last[1] / 2, list_positions_first_middle_last[2] + list_positions_first_middle_last[1] / 2]) if len(canais) == 2
else np.asarray([list_positions_first_middle_last[0]] + [list_positions_first_middle_last[1] / (len(canais) - 2)] * (len(canais) - 2) + [list_positions_first_middle_last[0]]))
# multiplying the results with the conversion value
channels_value = channels_value * self.conversion_value
# multiplying with the boolean column that indicates if the conversion
# happened
channels_value = channels_value * self.journey_with_conv.apply(int)
channels_value = channels_value.apply(lambda values: values.tolist())
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Grouping the attributed values for each channel
if group_by_channels_models:
frame = self.group_by_results_function(channels_value, model_name)
else:
frame = 'group_by_channels_models = False'
return (channels_value, frame)
def attribution_position_decay(self, group_by_channels_models=True):
"""
OBS: This function is in working progress
Linear decay for each touchpoint further from conversion.
Parameters:
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_position_decay_heuristic'
channels_value = self.channels.apply(
lambda channels: np.asarray(
[1]) if len(channels) == 1 else (
np.asarray(
list(
range(
1,
len(channels) +
1))) /
np.sum(
np.asarray(
list(
range(
1,
len(channels) +
1))))))
# multiplying the results with the conversion value
channels_value = channels_value * self.conversion_value
# multiplying with the boolean column that indicates if the conversion
# happened
channels_value = channels_value * self.journey_with_conv.apply(int)
channels_value = channels_value.apply(lambda values: values.tolist())
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Grouping the attributed values for each channel
if group_by_channels_models:
frame = self.group_by_results_function(channels_value, model_name)
else:
frame = 'group_by_channels_models = False'
return (channels_value, frame)
def attribution_time_decay(
self,
decay_over_time=0.5,
frequency=168,
group_by_channels_models=True):
"""
Decays for each touchpoint further from conversion
Parameters:
decay_over_time = percentage that will be lost by time away from the conversion
frequency = The frequency in hours that the decay will happen
group_by_channels_models= True by default. Will aggregate the attributed results by each channel on
self.group_by_channels_models
"""
model_name = 'attribution_time_decay' + str(decay_over_time) + '_freq' + str(frequency) + '_heuristic'
# Removing zeros and dividing by the frequency
time_till_conv_window = self.time_till_conv.apply(lambda time_till_conv:
np.exp(math.log(decay_over_time) * np.floor(np.asarray(time_till_conv) / frequency)) /
sum(np.exp(math.log(decay_over_time) * np.floor(np.asarray(time_till_conv) / frequency))) )
# multiplying the results with the conversion value
channels_value = time_till_conv_window * self.conversion_value
# multiplying with the boolean column that indicates if the conversion
# happened
channels_value = channels_value * self.journey_with_conv.apply(int)
channels_value = channels_value.apply(lambda values: values.tolist())
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Grouping the attributed values for each channel
if group_by_channels_models:
frame = self.group_by_results_function(channels_value, model_name)
else:
frame = 'group_by_channels_models = False'
return (channels_value, frame)
def attribution_markov(self, transition_to_same_state=False, group_by_channels_models=True):
"""
"""
model_name = 'attribution_markov'
model_type = '_algorithmic'
if transition_to_same_state:
model_name = model_name + '_same_state' + model_type
else:
model_name = model_name + model_type
def power_to_infinity(matrix):
"""
Raises a square matrix to an infinite power using eigendecomposition.
All matrix rows must add to 1.
M = Q*L*inv(Q), where L = eigenvalue diagonal values, Q = eigenvector matrix
M^N = Q*(L^N)*inv(Q)
"""
eigen_value, eigen_vectors = np.linalg.eig(matrix)
# At infinity everything converges to 0 or 1, thus we use np.trunc()
diagonal = np.diag(np.trunc(eigen_value.real + 0.001))
try:
result = (eigen_vectors @ diagonal @ np.linalg.inv(eigen_vectors)).real
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
warnings.warn("Warning... Singular matrix error. Check for lines or cols fully filled with zeros")
result = (eigen_vectors @ diagonal @ np.linalg.pinv(eigen_vectors)).real
else:
raise
return result
def normalize_rows(matrix):
size = matrix.shape[0]
mean = matrix.sum(axis=1).reshape((size, 1))
mean = np.where(mean == 0, 1, mean)
return matrix / mean
def calc_total_conversion(matrix):
normal_matrix = normalize_rows(matrix)
infinity_matrix = power_to_infinity(normal_matrix)
return infinity_matrix[0, -1]
def removal_effect(matrix):
size = matrix.shape[0]
conversions = np.zeros(size)
for column in range(1, size - 2):
temp = matrix.copy()
temp[:, -2] = temp[:, -2] + temp[:, column]
temp[:, column] = 0
conversions[column] = calc_total_conversion(temp)
conversion_orig = calc_total_conversion(matrix)
return 1 - (conversions / conversion_orig)
def path_to_matrix(paths):
channel_max = int(paths[:, 0:2].max()) + 1
matrix = np.zeros((channel_max, channel_max), dtype="float")
for x, y, val in paths:
matrix[int(x), int(y)] = val
matrix[-1, -1] = 1
matrix[-2, -2] = 1
return matrix
temp = self.channels.apply(
lambda x: ["(inicio)"] + x) + self.journey_with_conv.apply(
lambda x: [
"(conversion)" if x else "(null)"])
orig = []
dest = []
journey_length = []
def save_orig_dest(arr):
orig.extend(arr[:-1])
dest.extend(arr[1:])
journey_length.append(len(arr))
temp.apply(save_orig_dest)
# copying conversion_quantity to each new row
if type(self.conversion_value) in (int, float):
#we do not hava a frequency column yet so we are using self.conversion_value.apply(lambda x: 1)
# to count each line
conversion_quantity = self.conversion_value.apply(lambda x: 1)
else:
conversion_quantity = []
for a,b in zip(self.conversion_value.apply(lambda x: 1), journey_length):
conversion_quantity.extend([a] * (b-1))
temp = pd.DataFrame({"orig": orig, "dest": dest, "count": conversion_quantity})
temp = temp.groupby(["orig", "dest"], as_index=False).sum()
if not transition_to_same_state:
temp = temp[temp.orig != temp.dest]
# Converting channels_names to index and pass a numpy array foward
channels_names = (
["(inicio)"]
+ list(
(set(temp.orig) - set(["(inicio)"]))
| (set(temp.dest) - set(["(conversion)", "(null)"]))
)
+ ["(null)", "(conversion)"]
)
temp["orig"] = temp.orig.apply(channels_names.index)
temp["dest"] = temp.dest.apply(channels_names.index)
matrix = path_to_matrix(temp[["orig", "dest", "count"]].values)
removal_effect_result = removal_effect(matrix)[1:-2]
results = removal_effect_result / removal_effect_result.sum(axis=0)
# Channels weights
frame = pd.DataFrame({"value": results}, index=channels_names[1:-2])
removal_effect_result = pd.DataFrame({"removal_effect": removal_effect_result}, index=channels_names[1:-2])
# Transition matrix
matrix = normalize_rows(matrix)
matrix = pd.DataFrame(matrix, columns=channels_names, index=channels_names)
# Apply weights back to each journey
chmap = {a: b[0] for a,b in zip(frame.index.values, frame.values)}
channels_value = self.channels.apply(lambda y: [chmap[x] for x in y])
channels_value = channels_value.apply(lambda x: list(np.array(x) / sum(x)))
# Adding the results to self.DataFrame
self.as_pd_dataframe()
self.DataFrame[model_name] = channels_value.apply(lambda x : self.sep.join([str(value) for value in x]))
# Grouping the attributed values for each channel
if group_by_channels_models:
if isinstance(self.group_by_channels_models, pd.DataFrame):
frame = frame.reset_index()
frame.columns = ['channels', model_name]
frame[model_name] = frame[model_name] * self.conversion_value.sum()
self.group_by_channels_models = pd.merge(self.group_by_channels_models, frame, how='outer', on=['channels']).fillna(0)
else:
frame = frame.reset_index()
frame.columns = ['channels', model_name]
frame[model_name] = frame[model_name] * self.conversion_value.sum()
self.group_by_channels_models = frame
else:
frame = 'group_by_channels_models = False'
return (channels_value, frame, matrix, removal_effect_result)
def journey_conversion_table(self, order = False, size = None):
"""
Transforms journey channels in boolean columns,
count the number of conversions and journeys and
compute the conversion rate of the channel combination
"""
#Creating Channels DF
df_temp = self.journey_id.copy()
if order:
df_temp['combinations'] = self.channels.apply(lambda channels: sorted(list(set(channels)), key=lambda x: channels.index(x)) ).copy()
else:
df_temp['combinations'] = self.channels.apply(lambda channels: sorted(list(set(channels))) ).copy()
if size != None:
df_temp['combinations'] = df_temp['combinations'].apply(lambda channels: self.sep.join(channels[size * -1:]) )
else:
df_temp['combinations'] = df_temp['combinations'].apply(lambda channels: self.sep.join(channels) )
#Adding journey_with_conv column
df_temp['journey_with_conv'] = self.journey_with_conv.apply(int)
df_temp['conversion_value'] = self.conversion_value
#Grouping journey_with_conv
conv_val = df_temp.groupby(['combinations'])['conversion_value'].sum().reset_index()['conversion_value']
df_temp = df_temp.groupby(['combinations'])['journey_with_conv'].agg([('conversions', 'sum'), ('total_sequences', 'count')]).reset_index()
df_temp['conversion_value'] = conv_val
#Calculating the conversion rate
df_temp['conv_rate'] = df_temp['conversions'] / df_temp['total_sequences']
return df_temp
def coalitions(self, size = 4, unique_channels = None, order=False):
"""
This function gives all the coalitions of different channels in a matrix. Most of the extra parameters
are used when calculating Shapley's value with order.
**size** = limits max size of unique channels in a single journey
**unique_channels** = By default will check self.channels unique values, or a list of channels can be passed
as well.
**order** = Boolean that indicates if the order of channels matters during the process.
"""
if unique_channels is None:
unique_channels = list(set(sum(self.channels.values, [])))
else:
unique_channels = unique_channels
channels_combination = []
# Creating a list with all the permutations if order is True
if order is True:
for L in range(0, size + 1):
for subset in itertools.combinations(unique_channels, L):
channels_combination.append(list(subset))
else:
for L in range(0, size + 1):
for subset in itertools.combinations(sorted(unique_channels), L):
channels_combination.append(list(subset))
#Creating a DF with the channels as the boolean columns
df_temp =
|
pd.Series(channels_combination)
|
pandas.Series
|
import logging
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectKBest, chi2
from classes.Dataset import Dataset
class FeaturesSelector:
def __init__(self, dataset: Dataset):
self.__features = dataset.get_features()
self.__labels = dataset.get_labels()
self.__best_features_ids = []
def get_features(self) -> pd.DataFrame:
return self.__features
def get_best_features_ids(self) -> List[str]:
return self.__best_features_ids
def univariate_selection(self, num_features: int):
"""
Apply sklearn.SelectKBest class to extract the top num_features best features
:param num_features: the number of top features to be extracted
"""
logging.info("Performing univariate selection...")
# Perform univariate selection
best_features = SelectKBest(score_func=chi2, k=num_features)
fit = best_features.fit(self.__features, self.__labels)
scores = pd.DataFrame(fit.scores_)
columns = pd.DataFrame(self.__features.columns)
# Concat two dataframes for better visualization
feature_scores = pd.concat([columns, scores], axis=1)
# Name the dataframe columns
feature_scores.columns = ['Specs', 'Score']
# Log the 10 best features
logging.info("The {} best features are:".format(num_features))
logging.info(feature_scores.nlargest(10, 'Score'))
for feature in self.__features:
if not feature_scores.nlargest(num_features, 'Score')['Specs'].str.contains(feature).any():
self.__features.drop(feature, axis=1, inplace=True)
else:
logging.warning('Added FEATURE {}'.format(feature))
self.__best_features_ids.append(feature)
def features_importance(self, num_features: int, show: bool = False):
logging.info("Calculating features importances...")
model = ExtraTreesClassifier()
model.fit(self.__features, self.__labels)
# Use inbuilt class feature_importances of tree based classifiers
feature_importances = model.feature_importances_
logging.info("Features importances:")
logging.info(feature_importances)
# Plot graph of feature importances for better visualization
importances =
|
pd.Series(feature_importances, index=self.__features.columns)
|
pandas.Series
|
# parse log files and generate an excel file
import re
import sys, getopt
import pandas as pd
import xlsxwriter
rx_dict = {
'File': re.compile(r'File: (?P<file>.*) , Top Module: (?P<top_module>.*)'),
'Faults': re.compile(r'Found (?P<fault_sites>.*) fault sites in (?P<gates>.*) gates and (?P<ports>.*) ports.'),
'Time': re.compile(r'Time elapsed: (?P<time>.*)s.'),
'Coverage': re.compile(r'Simulations concluded: Coverage (?P<coverage>.*)%'),
'Iteration': re.compile(r'\((?P<current_coverage>.*)%/(?P<min_coverage>.*)%,\) incrementing to (?P<tv_count>.*).'),
}
def main(argv):
log_file, output_file = parse_args(argv)
data = pd.DataFrame(columns=["File", "Top Module", "Fault Sites", "Gate Count", "Ports", "Run Time", "TV Count", "Coverage"])
benchmark = pd.DataFrame(columns=["Current Coverage", "Minimum Coverage", "TV Count"])
sheets = {}
row = {}
iteration = {}
with open(log_file, 'r') as file_object:
line = file_object.readline()
while line:
# at each line check for a match with a regex
key, match = _parse_line(line)
if key == "File":
if row:
tv_count = -1 # indicates coverage is met with minimum set tv count; no iterations took place
if not benchmark.empty: # if coverage is not met with the minimum tv count
sheets[row["File"]] = benchmark
tv_count = benchmark.iloc[-1]["TV Count"]
benchmark =
|
pd.DataFrame(columns=["Current Coverage", "Minimum Coverage", "TV Count"])
|
pandas.DataFrame
|
import random
import numpy as np
import pandas as pd
from agents.abstract_agent import Agent
from gym_splendor_code.envs.mechanics.action import Action
from gym_splendor_code.envs.mechanics.game_settings import POINTS_TO_WIN
from gym_splendor_code.envs.mechanics.state import State
from gym_splendor_code.envs.mechanics.state_as_dict import StateAsDict
from archive.vectorization import vectorize_state, vectorize_action
class MinMaxAgent(Agent):
def __init__(self,
name: str = "MinMax",
weight: list = [100,2,2,1,0.1],
decay: float = 0.9,
depth: int = 3,
collect_stats: bool=False):
super().__init__()
#we create own gym_open_ai-splendor enivronemt to have access to its functionality
#We specify the name of the agent
self.name = name + ' ' + str(weight)
self.weight = weight
self.normalize_weight()
self.decay = decay
self.depth = depth
self.action_to_avoid = -100
self.collect_stats = collect_stats
if self.collect_stats:
self.stats_dataframe =
|
pd.DataFrame(columns=('state', 'action', 'evaluation'))
|
pandas.DataFrame
|
# Ab initio Elasticity and Thermodynamics of Minerals
#
# Version 2.5.0 27/10/2021
#
# Comment the following three lines to produce the documentation
# with readthedocs
# from IPython import get_ipython
# get_ipython().magic('cls')
# get_ipython().magic('reset -sf')
import datetime
import os
import sys
import scipy
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
# from matplotlib import rc
import pandas as pd
import sympy as sym
import parame as pr
from scipy.optimize import curve_fit, fmin, minimize_scalar, minimize
from scipy.interpolate import UnivariateSpline, Rbf
from scipy import integrate
from plot import plot_class
from mineral_data import mineral, load_database, equilib, reaction,\
pressure_react, export, field, import_database, name_list
from mineral_data import ens, cor, py, coe, q, fo, ky, sill, andal, per, sp, \
mao, fmao, stv, cc, arag, jeff, jeff_fe, jeff_fe3p, jeff_feb
import_database()
mpl.rcParams['figure.dpi']= 80
class latex_class():
"""
Setup for the use of LaTeX for axis labels and titles; sets of parameters
for graphics output.
"""
def __init__(self):
self.flag=False
self.dpi=300
self.font_size=14
self.tick_size=12
self.ext='jpg'
mpl.rc('text', usetex=False)
def on(self):
self.flag=True
mpl.rc('text', usetex=True)
def off(self):
self.flag=False
mpl.rc('text', usetex=False)
def set_param(self, dpi=300, fsize=14, tsize=12, ext='jpg'):
"""
Args:
dpi: resolution of the graphics file (default 300)
fsize: size of the labels of the axes in points (default 14)
tsize: size of the ticks in points (default 12)
ext: extension of the graphics file (default 'jpg'); this argument
is only used in those routines where the name of the file is
automatically produced by the program (e.g. check_poly or
check_spline functions). In other cases, the extension is
directly part of the name of the file given as argument to
the function itself, and 'ext' is ignored.
"""
self.dpi=dpi
self.font_size=fsize
self.tick_size=tsize
self.ext=ext
def get_dpi(self):
return self.dpi
def get_fontsize(self):
return self.font_size
def get_ext(self):
return self.ext
def get_tsize(self):
return self.tick_size
class flag:
def __init__(self,value):
self.value=value
self.jwar=0
def on(self):
self.value=True
def off(self):
self.value=False
def inc(self):
self.jwar += 1
def reset(self):
self.jwar=0
class verbose_class():
def __init__(self,value):
self.flag=value
def on(self):
self.flag=True
print("Verbose mode on")
def off(self):
self.flag=False
print("Verbose mode off")
class BM3_error(Exception):
pass
class vol_corr_class:
def __init__(self):
self.flag=False
self.v0_init=None
def on(self):
self.flag=True
def off(self):
self.flag=False
def set_volume(self,vv):
self.v0_init=vv
class data_info():
"""
Stores information about the current settings
"""
def __init__(self):
self.min_static_vol=None
self.max_static_vol=None
self.static_points=None
self.min_freq_vol=None
self.max_freq_vol=None
self.freq_points=None
self.min_select_vol=None
self.max_select_vol=None
self.select_points=None
self.freq_sets=None
self.fit_type='No fit'
self.min_vol_fit=None
self.max_vol_fit=None
self.fit_points=None
self.fit_degree=None
self.fit_smooth=None
self.k0=None
self.kp=None
self.v0=None
self.temp=None
self.k0_static=None
self.kp_static=None
self.v0_static=None
self.popt=None
self.popt_orig=None
self.min_names=name_list.mineral_names
self.title=None
def show(self):
"""
Prints information about the current settings stored in the classes
"""
if self.title !=None:
print(self.title)
print("\nCurrent settings and results\n")
if self.min_static_vol != None:
print("Static data ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_static_vol, self.max_static_vol, self.static_points))
if self.min_freq_vol != None:
print("Frequency volume range ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_freq_vol, self.max_freq_vol, self.freq_points))
if self.min_select_vol != None:
print("Selected freq. sets ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_select_vol, self.max_select_vol, self.select_points))
print("Frequency sets: %s" % str(self.freq_sets))
if self.fit_type != 'No fit':
if self.fit_type=='poly':
print("\nFit of frequencies ** type: %s, degree: %d" \
% (self.fit_type, self.fit_degree))
else:
print("\nFit of frequencies ** type: %s, degree: %d, smooth: %2.1f" \
% (self.fit_type, self.fit_degree, self.fit_smooth))
print(" min, max volumes: %8.4f, %8.4f; points %d" %\
(self.min_vol_fit, self.max_vol_fit, self.fit_points))
else:
print("No fit of frequencies")
if supercell.flag:
print("\n*** This is a computation performed on SUPERCELL data")
print(" (SCELPHONO and QHA keywords in CRYSTAL). Number of cells: %3i" % supercell.number)
if self.k0_static != None:
print("\n*** Static EoS (BM3) ***")
print("K0: %6.2f GPa, Kp: %4.2f, V0: %8.4f A^3" %\
(self.k0_static, self.kp_static, self.v0_static))
if static_range.flag:
print("\n*** Static EoS is from a restricted volume range:")
print("Minimum volume: %8.3f" % static_range.vmin)
print("Maximum volume: %8.3f" % static_range.vmax)
if p_stat.flag:
print("\n*** Static EoS from P(V) data ***")
print("Data points num: %3i" % p_stat.npoints)
print("Volume range: %8.4f, %8.4f (A^3)" % (p_stat.vmin, p_stat.vmax))
print("Pressure range: %5.2f, %5.2f (GPa)" % (p_stat.pmax, p_stat.pmin))
print("EoS -- K0: %6.2f (GPa), Kp: %4.2f, V0: %8.4f (A^3)" % (p_stat.k0,\
p_stat.kp, p_stat.v0))
print("Energy at V0: %12.9e (hartree)" % p_stat.e0)
if self.k0 != None:
print("\n** BM3 EoS from the last computation, at the temperature of %5.2f K **" % self.temp)
print("K0: %6.2f GPa, Kp: %4.2f, V0: %8.4f A^3" %\
(self.k0, self.kp, self.v0))
if not f_fix.flag:
print("Kp not fixed")
else:
print("Kp fixed")
if exclude.ex_mode != []:
uniq=np.unique(exclude.ex_mode)
print("\nZone center excluded modes: %s" % str(uniq))
else:
print("\nNo zone center excluded modes")
if disp.ex_flag:
uniq=np.unique(disp.excluded_list)
print("Off center excluded modes: %s" % str(uniq))
else:
print("No off center excluded modes")
if kieffer.flag==True:
print("\nKieffer model on; frequencies %5.2f %5.2f %5.2f cm^-1" %\
(kieffer.kief_freq_inp[0], kieffer.kief_freq_inp[1], \
kieffer.kief_freq_inp[2]))
else:
print("\nKieffer model off")
if anharm.flag:
print("\nAnharmonic correction for mode(s) N. %s" % str(anharm.mode).strip('[]'))
print("Brillouin flag(s): %s" % str(anharm.brill).strip('[]'))
if disp.flag:
print("\n--------------- Phonon dispersion --------------------")
print("\nDispersion correction activated for the computation of entropy and")
print("specific heat:")
print("Number of frequency sets: %3i" % disp.nset)
if disp.nset > 1:
if disp.fit_type == 0:
print("Polynomial fit of the frequencies; degree: %3i " % disp.fit_degree)
else:
print("Spline fit of the frequencies; degree: %3i, smooth: %3.1f"\
% (disp.fit_degree, disp.fit_type))
print("Number of off-centered modes: %5i" % disp.f_size)
if disp.eos_flag:
print("\nThe phonon dispersion is used for the computation of the bulk modulus")
print("if the bulk_dir or the bulk_modulus_p functions are used, the latter")
print("in connection with the noeos option.")
if disp.fit_vt_flag:
print("The required V,T-fit of the free energy contribution from")
print("the off-centered modes is ready. Fit V,T-powers: %3i, %3i"
% (disp.fit_vt_deg_v, disp.fit_vt_deg_t))
else:
print("The required V,T-fit of the free energy contribution from")
print("the off-centered mode is NOT ready.")
else:
print("\nThe phonon dispersion correction is not used for the computation")
print("of the bulk modulus")
if disp.thermo_vt_flag & (disp.nset > 1):
print("\nVT-phonon dispersion correction to the thermodynamic properties")
elif (not disp.thermo_vt_flag) & (disp.nset > 1):
print("\nT-phonon dispersion correction to the thermodynamic properties")
print("Use disp.thermo_vt_on() to activate the V,T-correction")
print("\n --------------------------------------------------------")
if lo.flag:
out_lo=(lo.mode, lo.split)
df_out=pd.DataFrame(out_lo, index=['Mode', 'Split'])
df_out=df_out.T
df_out['Mode']=np.array([int(x) for x in df_out['Mode']], dtype=object)
print("\nFrequencies corrected for LO-TO splitting.\n")
if verbose.flag:
print(df_out.to_string(index=False))
print("---------------------------------------------")
print("\n**** Volume driver for volume_dir function ****")
print("Delta: %3.1f; degree: %2i; left: %3.1f; right: %3.1f, Kp_fix: %s; t_max: %5.2f"\
% (volume_ctrl.delta, volume_ctrl.degree, volume_ctrl.left, volume_ctrl.right,\
volume_ctrl.kp_fix, volume_ctrl.t_max))
print("EoS shift: %3.1f; Quad_shrink: %2i; T_dump: %3.1f; Dump fact.: %2.1f, T_last %4.1f" % \
(volume_ctrl.shift, volume_ctrl.quad_shrink, volume_ctrl.t_dump, volume_ctrl.dump,\
volume_ctrl.t_last))
print("Upgrade shift: %r" % volume_ctrl.upgrade_shift)
print("\n**** Volume driver for volume_from_F function ****")
print("In addition to the attributes set in the parent volume_control_class:")
print("shift: %3.1f, flag: %r, upgrade_shift: %r" % (volume_F_ctrl.get_shift(), \
volume_F_ctrl.get_flag(), volume_F_ctrl.get_upgrade_status()))
print("\n**** Numerical T-derivatives driver class (delta_ctrl) ****")
if not delta_ctrl.adaptive:
print("Delta: %3.1f" % delta_ctrl.delta)
print("Degree: %3i" % delta_ctrl.degree)
print("N. of points %3i" % delta_ctrl.nump)
else:
print("Adaptive scheme active:")
print("T_min, T_max: %4.1f, %6.1f K" % (delta_ctrl.tmin, delta_ctrl.tmax))
print("Delta_min, Delta_max: %4.1f, %6.1f K" % (delta_ctrl.dmin, delta_ctrl.dmax))
print("Degree: %3i" % delta_ctrl.degree)
print("N. of points %3i" % delta_ctrl.nump)
if verbose.flag:
print("\n--------- Database section ---------")
print("Loaded phases:")
print(self.min_names)
class exclude_class():
"""
Contains the list of modes to be excluded from the
calculation of the Helmholtz free energy.
It can be constructed by using the keyword EXCLUDE
in the input.txt file.
"""
def __init__(self):
self.ex_mode=[]
self.ex_mode_keep=[]
self.flag=False
def __str__(self):
return "Excluded modes:" + str(self.ex_mode)
def add(self,modes):
"""
Args:
n : can be a scalar or a list of modes to be excluded
"""
if type(modes) is list:
self.ex_mode.extend(modes)
self.flag=True
elif type(modes) is int:
self.ex_mode.append(modes)
self.flag=True
else:
print("** Warning ** exclude.add(): invalid input type")
return
def restore(self):
"""
Restores all the excluded modes
"""
if self.flag:
self.ex_mode_keep=self.ex_mode
self.ex_mode=[]
self.flag=False
def on(self):
self.ex_mode=self.ex_mode_keep
self.flag=True
class fix_flag:
def __init__(self,value=0.):
self.value=value
self.flag=False
def on(self,value=4):
self.value=value
self.flag=True
def off(self):
self.value=0.
self.flag=False
class fit_flag:
def __init__(self):
pass
def on(self):
self.flag=True
def off(self):
self.flag=False
class spline_flag(fit_flag):
"""
Sets up the spline fit of the frequencies as functions of
the volume of the unit cell.
Several variables are defined:
1. flag: (boolean); if True, frequencies are fitted with splines
2. degree: degree of the spline
3. smooth: *smoothness* of the spline
4. flag_stack: (boolean) signals the presence of the spline stack
5. pol_stack: it is the stack containing parameters for the spline fit
Note:
The spline stack can be set up and initialized by using the keyword\
SPLINE under the keyword FITVOL in the *input.txt* file
Methods:
"""
def __init__(self,flag=False,degree=3,smooth=0):
super().__init__()
self.flag=False
self.flag_stack=False
self.degree=degree
self.smooth=smooth
self.pol_stack=np.array([])
def on(self):
super().on()
def off(self):
super().off()
def set_degree(self,degree):
self.degree=int(degree)
def set_smooth(self,smooth):
self.smooth=smooth
def stack(self):
self.pol_stack=freq_stack_spline()
self.flag_stack=True
def vol_range(self,v_ini, v_fin, npoint):
self.fit_vol=np.linspace(v_ini, v_fin, npoint)
class poly_flag(fit_flag):
def __init__(self,flag=False,degree=2):
super().__init__()
self.flag=flag
self.flag_stack=False
self.degree=degree
self.pol_stack=np.array([])
def on(self):
super().on()
def off(self):
super().off()
def set_degree(self,degree):
self.degree=int(degree)
def stack(self):
self.pol_stack=freq_stack_fit()
self.flag_stack=True
def vol_range(self,v_ini, v_fin, npoint):
self.fit_vol=np.linspace(v_ini, v_fin, npoint)
class kieffer_class():
def __str__(self):
return "Application of the Kieffer model for acoustic phonons"
def __init__(self,flag=False):
self.flag=False
self.stack_flag=False
self.kief_freq=None
self.kief_freq_inp=None
self.t_range=None
self.f_list=None
self.input=False
def stack(self, t_range, f_list):
self.t_range=t_range
self.f_list=f_list
def get_value(self,temperature):
free=scipy.interpolate.interp1d(self.t_range, self.f_list, kind='quadratic')
return free(temperature)*zu
def on(self):
self.flag=True
print("Kieffer correction on")
if disp.flag:
disp.flag=False
print("Phonon dispersion is deactivated")
if not self.stack_flag:
free_stack_t(pr.kt_init,pr.kt_fin,pr.kt_points)
def off(self):
self.flag=False
print("Kieffer correction off")
def freq(self,f1,f2,f3):
self.kief_freq_inp=np.array([f1, f2, f3])
self.kief_freq=self.kief_freq_inp*csl*h/kb
free_stack_t(pr.kt_init,pr.kt_fin,pr.kt_points)
def plot(self):
plt.figure()
plt.plot(self.t_range, self.f_list, "k-")
plt.xlabel("Temperature (K)")
plt.ylabel("F free energy (J/mol apfu)")
plt.title("Free energy from acustic modes (Kieffer model)")
plt.show()
class bm4_class():
"""
Set up and information for a 4^ order Birch-Murnaghan EoS (BM4)
It provides:
1. energy: function; Volume integrated BM4 (V-BM4)
2. pressure: function; BM4
3. bm4_static_eos: BM4 parameters for the static energy
calculation as a function of V
4. en_ini: initial values for the BM4 fit
5. bm4_store: BM4 parameters from a fitting at a given
temperature
methods:
"""
def __init__(self):
self.flag=False
self.start=True
self.energy=None
self.pressure=None
self.en_ini=None
self.bm4_static_eos=None
self.bm4_store=None
def __str__(self):
return "BM4 setting: " + str(self.flag)
def on(self):
"""
Switches on the BM4 calculation
"""
self.flag=True
if self.start:
self.energy, self.pressure=bm4_def()
self.start=False
def estimates(self,v4,e4):
"""
Estimates initial values of BM4 parameters for the fit
"""
ini=init_bm4(v4,e4,4.0)
new_ini,dum=curve_fit(v_bm3, v4, e4, \
p0=ini,ftol=1e-15,xtol=1e-15)
kpp=(-1/new_ini[1])*((3.-new_ini[2])*\
(4.-new_ini[2])+35./9.)*1e-21/conv
self.en_ini=[new_ini[0], new_ini[1],\
new_ini[2], kpp, new_ini[3]]
k0_ini=new_ini[1]*conv/1e-21
print("\nBM4-EoS initial estimate:")
print("\nV0: %6.4f" % self.en_ini[0])
print("K0: %6.2f" % k0_ini)
print("Kp: %6.2f" % self.en_ini[2])
print("Kpp: %6.2f" % self.en_ini[3])
print("E0: %8.6e" % self.en_ini[4])
def store(self,bm4st):
"""
Stores BM4 parameters from a fit a given temperature
"""
self.bm4_store=bm4st
def upload(self,bm4_eos):
"""
Loads the parameters from the static calculation
(that are then stored in bm4_static_eos)
"""
self.bm4_static_eos=bm4_eos
def upgrade(self):
"""
Uses the stored values of parameters [from the application of
store()] to upgrade the initial estimation done with estimates()
"""
self.en_ini=self.bm4_store
def off(self):
"""
Switches off the BM4 calculation
"""
self.flag=False
def status(self):
"""
Informs on the status of BM4 (on, or off)
"""
print("\nBM4 setting: %s " % self.flag)
class gamma_class():
"""
Store coefficients of a gamma(T) fit
"""
def __init__(self):
self.flag=False
self.degree=1
self.pol=np.array([])
def upload(self,deg,pcoef):
self.flag=True
self.degree=deg
self.pol=pcoef
class super_class():
"""
Store supercell data: number of cells on which the frequencies
computation was done. To be used in connection with CRYSTAL
calculations performed with SCELPHONO and QHA keywords.
Default value: 1
"""
def __init__(self):
self.number=1
self.flag=False
def set(self,snum):
self.flag=True
self.number=snum
print("\n*** Supercell *** Number of cells: %3i" % snum)
def reset(self):
self.flag=False
self.number=1
print("\n*** Supercell deactivated *** Number of cells set to 1")
class lo_class():
"""
LO/TO splitting correction.
The class stores a copy of the original TO frequencies, the modes
affected by LO/TO splitting and the splitting values.
Modes are identified by their progressive number (starting from 0) stored
in the *mode* attribute.
When the correction is activated, new values of frequencies (*f_eff*)
are computed for the relevant modes, according to the formula:
f_eff = 2/3 f_TO + 1/3 f_LO
where f_LO = f_TO + split.
Correction is activated by the keyword LO in the input.txt file,
followed by the name of the file containing the splitting data (two
columns: mode number and the corresponding split in cm^-1).
Internally, the methods *on* and *off* switch respectively on and off
the correction. The method *apply* does the computation of the frequencies
*f_eff*.
"""
def __init__(self):
self.flag=False
self.mode=np.array([])
self.split=np.array([])
self.data_freq_orig=np.array([])
self.data_freq=np.array([])
def on(self):
self.apply()
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
self.flag=True
print("Frequencies corrected for LO-TO splitting")
def off(self):
self.flag=False
self.data_freq=np.copy(self.data_freq_orig)
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
print("LO-TO splitting not taken into account")
def apply(self):
for ifr in np.arange(lo.mode.size):
im=lo.mode[ifr]
for iv in int_set:
freq_lo=self.data_freq_orig[im,iv+1]+self.split[ifr]
self.data_freq[im,iv+1]=(2./3.)*self.data_freq_orig[im,iv+1]\
+(1./3.)*freq_lo
class anh_class():
def __init__(self):
self.flag=False
self.disp_off=0
def off(self):
self.flag=False
exclude.restore()
if disp.input_flag:
disp.free_exclude_restore()
print("Anharmonic correction is turned off")
print("Warning: all the excluded modes are restored")
def on(self):
self.flag=True
self.flag_brill=False
for im, ib in zip(anharm.mode, anharm.brill):
if ib == 0:
exclude.add([im])
elif disp.input_flag:
disp.free_exclude([im])
self.flag_brill=True
if self.flag_brill:
disp.free_fit_vt()
print("Anharmonic correction is turned on")
class static_class():
"""
Defines the volume range for the fit of the static EoS
If not specified (default) such range is defined from the
volumes found in the static energies file.
"""
def __init__(self):
self.flag=False
def set(self, vmin, vmax):
"""
Sets the minimum and maximum volumes for the V-range
Args:
vmin: minimum volume
vmax: maximum volume
"""
self.vmin=vmin
self.vmax=vmax
def off(self):
"""
Restores the original V-range (actually, it switches off the volume
selection for the fit of the static EoS)
"""
self.flag=False
def on(self):
"""
It switches on the volume selection for the fit of the static EoS
Note:
The minimum and maximum V-values are set by the 'set' method
of the class
"""
self.flag=True
class p_static_class():
def __init__(self):
self.flag=False
self.vmin=None
self.vmax=None
self.pmin=None
self.pmax=None
self.npoints=None
self.k0=None
self.kp=None
self.v0=None
self.e0=None
class volume_control_class():
"""
Defines suitable parameters for the volume_dir function
"""
def __init__(self):
self.degree=2
self.delta=2.
self.t_max=500.
self.shift=0.
self.t_dump=0.
self.dump=1.
self.quad_shrink=4
self.kp_fix=False
self.debug=False
self.upgrade_shift=False
self.skew=1.
self.t_last=0.
self.t_last_flag=False
self.v_last=None
def set_degree(self, degree):
"""
Sets the degree of polynomial used to fit the (P(V)-P0)^2 data.
The fitted curve is the minimized to get the equilibrium volume
at each T and P.
For each of the single parameter revelant in this class, there exist
a specific method to set its value. The method set_all can be used to
set the values of a number of that, at the same time, by using appropriate
keywords as argument. The arguments to set_all are:
Args:
degree: degree of the fitting polynomial (default=2)
delta: volume range where the minimum of the fitting function
is to be searched (default=2.)
skew: the Volume range is centered around the equilibrium
volume approximated by the EoS-based new_volume function
The symmetry around such point can be controlled by
the skew parameter (default=1.: symmetric interval)
shift: Systematic shift from the new_volume estimation (default=0.)
t_max: In the initial estimation of the volume at P/T with the EoS-based
new_volume function, the Kp is refined if T < t_max.
If T > t_max and kp_fix=True, Kp is fixed at the value
refined at t_max (default=500K)
kp_fix: See t_max (default=True)
quad_shrink: if degree=2, it restricts the volume range around the
approximated volume found. The new range is
delta/quad_shrink (default=4)
upgrade_shift: at the end of the computation, the difference between
the volume found and the initial one (from the EoS-
based new_volume function) is calculated. The shift
attribute is then upgraded if upgrade_shift is True
(default=False)
debug: if True, the (P(V)-P0)**2 function is plotted as a function
of V (default=False)
t_dump: temperature over which a dumping on the shift parameter is
applied (default=0.)
dump: dumping on the shift parameter (shift=shift/dump; default=1.)
t_last: if t_last > 10., the last volume computed is used as the
initial guess value (vini) for the next computation at a
new temperature.
"""
self.degree=degree
def set_delta(self, delta):
self.delta=delta
def set_tmax(self,tmax):
self.t_max=tmax
def set_skew(self, skew):
self.left=skew+1
self.right=(skew+1)/skew
def kp_on(self):
self.kp_fix=True
def kp_off(self):
self.kp_fix=False
def debug_on(self):
self.debug=True
def debug_off(self):
self.debug=False
def set_shift(self, shift):
self.shift=shift
def upgrade_shift_on(self):
self.upgrade_shift=True
def upgrade_shift_off(self):
self.ugrade_shift=False
def set_shrink(self, shrink):
self.quad_shrink=shrink
def shift_reset(self):
self.shift=0.
def set_t_dump(self,t_dump=0., dump=1.0):
self.t_dump=t_dump
self.dump=dump
def set_t_last(self, t_last):
self.t_last=t_last
def set_all(self,degree=2, delta=2., skew=1., shift=0., t_max=500.,\
quad_shrink=4, kp_fix=True, upgrade_shift=False, debug=False,\
t_dump=0., dump=1., t_last=0.):
self.degree=degree
self.delta=delta
self.t_max=t_max
self.kp_fix=kp_fix
self.debug=debug
self.left=skew+1
self.right=(skew+1)/skew
self.shift=shift
self.quad_shrink=quad_shrink
self.upgrade_shift=upgrade_shift
self.skew=skew
self.t_last=t_last
class volume_F_control_class():
"""
Class controlling some parameters relevant for the computation of
volume and thermal expansion by using the volume_from_F function.
Precisely, the initial volume (around which the refined volume vref
is to be searched) is set to vini+shift, where vini is the
output from the volume_dir, whereas shift is from this class.
Shift is computed as the difference vref-vini; it can be upgraded
provided the flag upgrade_shift is set to True.
"""
def __init__(self):
self.shift=0.
self.upgrade_shift=False
self.flag=False
def on(self):
self.flag=True
def off(self):
self.flag=False
def set_shift(self, sh):
self.shift=sh
def upgrade_on(self):
self.upgrade_shift=True
def upgrade_off(self):
self.upgrade_shift=False
def get_shift(self):
return self.shift
def get_upgrade_status(self):
return self.upgrade_shift
def get_flag(self):
return self.flag
class delta_class():
"""
Control parameters for the numerical evaluation of the first and second
derivatives of the Helmholtz free energy as a function of T. They are
relevant for the entropy_v function that computes both the entropy and
specific heat at a fixed volume, as well as the computation of thermal
expansion.
Initial values of delta, degree and number of points are read
from the parameters file 'parame.py'
New values can be set by the methods set_delta, set_degree and set_nump
of the class. values can be retrieved by the corresponding 'get' methods.
The reset method set the default values.
An adaptive scheme is activated by the method adaptive_on (adaptive_off
deactivates the scheme). In this case the delta value is computed as a function
of temperature (T). Precisely:
delta=delta_min+(T-t_min)*(delta_max-delta_min)/(t_max-t_min)
delta=delta_min if T < t_min
delta=delta_max if T > t_max
The paramaters t_min, t_max, delta_min and delta_max can be set by the
adaptive_set method (default values 50, 1000, 10, 50, respectively)
"""
def __init__(self):
self.delta=pr.delta
self.nump=pr.nump
self.degree=pr.degree
self.adaptive=False
self.tmin=50.
self.tmax=1000.
self.dmin=10.
self.dmax=50.
def adaptive_on(self):
self.adaptive=True
def adaptive_off(self):
self.adaptive=False
def adaptive_set(self, tmin=50., tmax=1000., dmin=10., dmax=50.):
self.tmin=tmin
self.tmax=tmax
self.dmin=dmin
self.dmax=dmax
def set_delta(self,delta):
self.delta=delta
print("Delta T value, for the computation of entropy, Cv and thermal expansion set to %4.1f" \
% self.delta)
def set_degree(self,degree):
self.degree=degree
print("Degree for the computation of entropy, Cv and thermal expansion set to %3i" \
% self.degree)
def set_nump(self,nump):
self.nump=nump
print("N. points for the computation of entropy, Cv and thermal expansion set to %3i" \
% self.nump)
def get_delta(self, tt=300):
if not self.adaptive:
return self.delta
else:
if tt < self.tmin:
return self.dmin
elif tt > self.tmax:
return self.dmax
else:
return self.dmin+((tt-self.tmin)/(self.tmax-self.tmin))*(self.dmax-self.dmin)
def get_degree(self):
return self.degree
def get_nump(self):
return self.nump
def reset(self):
self.delta=pr.delta
self.degree=pr.degree
self.nump=pr.nump
print("\nDefault parameters for the computation of entropy, Cv and thermal expansion:")
print("Delta: %3.1f" % self.delta)
print("Degree: %3i" % self.degree)
print("Num. points: %3i" % self.nump)
class disp_class():
"""
Sets up the computation for the inclusion of phonon dispersion effects
the EoS computation or for the calculation of all the thermodynamic
properties.
The class is relevant and activated if the DISP keyword is contained
in the input.txt input file.
Dispersion effects can be switched on or off by using the on() and off()
methods.
Note:
To apply the phonon dispersion correction to computation of an equation
of state, the method eos_on() must be invoked [the method eos_off() switches
it off]. In this case, more than one volume must be present in the input
file for dispersion.
Note:
If phonon frequencies are computed for several values of the unit cell volume,
in order to apply a VT-phonon dispersion correction to thermodynamic properties,
the method thermo_vt_on() must be invoked [the method thermo_vt_off() switches it off].
On the contrary, a T-phonon dispersion correction is applied (it is assumed that
phonon frequencies do not change with volume).
Note:
The method free_fit_vt() must be used to get the F(V,T) function for
off-center phonon modes.
"""
def __init__(self):
self.input_flag=False
self.flag=False
self.eos_flag=False
self.thermo_vt_flag=False
self.freq=None
self.deg=None
self.fit_type=None
self.input=False
self.fit_vt_flag=False
self.fit_vt=None
self.temp=None
self.error_flag=False
self.ex_flag=False
self.free_min_t=10.
self.fit_vt_deg_t=4
self.fit_vt_deg_v=4
self.fit_t_deg=6
self.free_nt=24
self.free_disp=True
def on(self):
self.flag=True
if anharm.disp_off > 0:
anharm.mode=np.copy(anharm.mode_orig)
anharm.brill=np.copy(anharm.brill_orig)
anharm.nmode=anharm.nmode_orig
print("Dispersion correction activated")
if kieffer.flag:
kieffer.flag=False
print("Kieffer correction is deactivated")
def off(self):
self.flag=False
print("Dispersion correction off")
if anharm.flag:
mode_a=np.array([])
mode_b=np.array([])
for ia, ib in zip(anharm.mode, anharm.brill):
if ib == 1:
print("\nWarning: the anharmonic mode n. %2i has Brillouin flag" % ia)
print("equal to 1; it should not be considered if the dispersion")
print("correction is deactivated.\n")
anharm.disp_off=anharm.disp_off+1
else:
mode_a=np.append(mode_a, ia)
mode_b=np.append(mode_b, ib)
if anharm.disp_off == 1:
anharm.nmode_orig=anharm.nmode
anharm.mode_orig=np.copy(anharm.mode)
anharm.brill_orig=np.copy(anharm.brill)
anharm.nmode=mode_a.size
anharm.mode=np.copy(mode_a)
anharm.brill=np.copy(mode_b)
print("List of anharmonic modes considered: %s" % anharm.mode)
def eos_on(self):
if self.flag :
if not self.error_flag:
self.eos_flag=True
print("\nPhonon dispersion correction for bulk_dir or bulk_modulus_p computations")
else:
print("Only 1 volume found in the 'disp' files; NO disp_eos possible")
else:
if self.input_flag:
print("Phonon dispersion is not on; use disp.on() to activate")
else:
print("No input of dispersion data; eos_on ignored")
def eos_off(self):
self.eos_flag=False
print("No phonon dispersion correction for bulk_dir computation")
def thermo_vt_on(self):
if self.nset > 1:
self.thermo_vt_flag=True
print("VT-dispersion correction of thermodynamic properties\n")
if not self.fit_vt_flag:
self.free_fit_vt()
else:
print("One volume only found in the DISP file")
def thermo_vt_off(self):
self.thermo_vt_flag=False
print("T-dispersion correction of thermodynamic properties")
print("No volume dependence considered")
def freq_spline_fit(self):
"""
It requests and makes spline fits of the frequencies of the off
center modes as function of volumes.
Relevant parameters for the fit (degree and smooth parameters) are
specified in the appropriate input file.
"""
self.spline=np.array([])
ord_vol=list(np.argsort(self.vol))
vol = [self.vol[iv] for iv in ord_vol]
for ifr in np.arange(self.f_size):
freq=self.freq[:,ifr]
freq=[freq[iv] for iv in ord_vol]
ifit=UnivariateSpline(vol, freq, k=self.fit_degree, s=self.fit_type)
self.spline=np.append(self.spline, ifit)
def freq_fit(self):
"""
It requests and makes polynomial fits of the frequencies of the off
center modes as function of volumes.
The relevant parameter for the fit (degree) is specified in the
appropriate input file.
"""
self.poly=np.array([])
for ifr in np.arange(self.f_size):
if self.nset > 1:
freq=self.freq[:,ifr]
ifit=np.polyfit(self.vol, freq, self.fit_degree)
self.poly=np.append(self.poly,ifit)
else:
self.poly=np.append(self.poly, (0, self.freq[:,ifr][0]))
if self.nset == 1:
self.poly=self.poly.reshape(self.f_size,2)
else:
self.poly=self.poly.reshape(self.f_size,self.fit_degree+1)
def freq_func(self,ifr,vv):
fit=self.poly[ifr]
return np.polyval(fit,vv)
def freq_spline_func(self,ifr,vv):
fit=self.spline[ifr](vv)
return fit.item(0)
def check(self,ifr):
"""
Check of the frequencies fit quality for a specified mode
Args:
ifr: sequence number of the mode to be checked
"""
v_list=np.linspace(np.min(disp.vol), np.max(disp.vol),40)
if self.fit_type == 0:
f_list=[self.freq_func(ifr,iv) for iv in v_list]
else:
f_list=[self.freq_spline_func(ifr,iv) for iv in v_list]
tlt="Check fit for mode N. "+ str(ifr)
plt.figure()
plt.plot(v_list,f_list, "k-")
plt.plot(disp.vol, disp.freq[:,ifr],"b*")
plt.xlabel("Volume (A^3)")
plt.ylabel("Frequency (cm^-1)")
plt.title(tlt)
plt.show()
def check_multi(self, fr_l):
"""
Check of the frequencies fit quality for a list of modes
Args:
fr_l: list of sequence numbers of the various modes to be checked
Example:
>>> disp.check_multi([0, 1, 2, 3])
>>> disp.check_multi(np.arange(10))
"""
for ifr in fr_l:
self.check(ifr)
def free_exclude(self,ex_list):
"""
Excludes the indicated off-center modes from the computation of the
free energy
Args:
ex_list: list of modes to be excluded
Note:
Even a single excluded mode must be specified as a list; for instance
disp.free_exclude([0])
Note:
after the exclusion of some modes, the F(V,T) function has
to be recomputed by the free_fit_vt method
"""
if not self.input_flag:
print("no input of dispersion data")
return
self.ex_flag=True
self.excluded_list=ex_list
print("Off center modes excluded: ", self.excluded_list)
print("Compute a new disp.free_fit_vt surface")
def free_exclude_restore(self):
"""
The excluded modes are restored
"""
self.ex_flag=False
print("All off centered mode restored")
print("Compute a new disp.free_fit_vt surface")
def free(self,temp,vv):
nf_list=np.arange(self.f_size)
if self.fit_type == 0:
freq=(self.freq_func(ifr,vv) for ifr in nf_list)
else:
freq=(self.freq_spline_func(ifr,vv) for ifr in nf_list)
d_deg=self.deg
wgh=self.w_list
enz=0.
fth=0.
idx=0
nfreq=0
for ifr in freq:
if not self.ex_flag:
nfreq=nfreq+1
fth=fth+d_deg[idx]*np.log(1-np.e**(ifr*e_fact/temp))*wgh[idx]
enz=enz+d_deg[idx]*ifr*ez_fact*wgh[idx]
else:
if not (idx in self.excluded_list):
nfreq=nfreq+1
fth=fth+d_deg[idx]*np.log(1-np.e**(ifr*e_fact/temp))*wgh[idx]
enz=enz+d_deg[idx]*ifr*ez_fact*wgh[idx]
idx=idx+1
return enz+fth*kb*temp/conv
def free_fit(self,mxt,vv,disp=True):
fit_deg=self.fit_t_deg
nt=24
nt_plot=50
tl=np.linspace(10,mxt,nt)
free=np.array([])
for it in tl:
ifree=self.free(it,vv)
free=np.append(free,ifree)
fit=np.polyfit(tl,free,fit_deg)
self.fit=fit
if disp:
tl_plot=np.linspace(10,mxt,nt_plot)
free_plot=self.free_func(tl_plot)
print("Phonon dispersion correction activated")
print("the contribution to the entropy and to the")
print("specific heat is taken into account.\n")
if verbose.flag:
plt.figure()
plt.plot(tl,free,"b*",label="Actual values")
plt.plot(tl_plot, free_plot,"k-",label="Fitted curve")
plt.legend(frameon=False)
plt.xlabel("T (K)")
plt.ylabel("F (a.u.)")
plt.title("Helmholtz free energy from off-centered modes")
plt.show()
def free_fit_ctrl(self, min_t=10., t_only_deg=4, degree_v=4, degree_t=4, nt=24, disp=True):
"""
Free fit driver: sets the relevant parameters for the fit computation
of the F(V,T) function, on the values of F calculated on a grid
of V and T points.
Args:
min_t: minimum temperature for the construction of the
VT grid (default=10.)
degree_v: maximum degree of V terms of the surface (default=4)
degree_t: maximum degree ot T terms of the sarface (default=4)
t_only_degree: degree of the T polynomial for a single volume
phonon dispersion (default=4)
nt: number of points along the T axis for the definition of the
(default=24) grid
disp: it True, a plot of the surface is shown (default=True)
Note:
The method does not execute the fit, but it defines the most
important parameters. The fit is done by the free_fit_vt() method.
Note:
the volumes used for the construction of the VT grid are those
provided in the appropriate input file. They are available
in the disp.vol variable.
"""
self.free_min_t=min_t
self.fit_t_deg=t_only_deg
self.fit_vt_deg_t=degree_t
self.fit_vt_deg_v=degree_v
self.free_nt=nt
self.free_disp=disp
if self.input_flag:
self.free_fit_vt()
self.free_fit(self.temp,self.vol[0])
def set_tmin(self,tmin):
self.min_t=tmin
def set_nt(self,nt):
self.nt=nt
def free_fit_vt(self):
self.fit_vt_flag=True
min_t=self.free_min_t
nt=self.free_nt
disp=self.free_disp
deg_t=self.fit_vt_deg_t
deg_v=self.fit_vt_deg_v
max_t=self.temp
pvv=np.arange(deg_v+1)
ptt=np.arange(deg_t+1)
p_list=np.array([],dtype=int)
maxvt=np.max([deg_v, deg_t])
for ip1 in np.arange(maxvt+1):
for ip2 in np.arange(maxvt+1):
i1=ip2
i2=ip1-ip2
if i2 < 0:
break
ic=(i1, i2)
if (i1 <= deg_v) and (i2 <= deg_t):
p_list=np.append(p_list,ic)
psize=p_list.size
pterm=int(psize/2)
self.p_list=p_list.reshape(pterm,2)
x0=np.ones(pterm)
t_list=np.linspace(min_t,max_t,nt)
v_list=self.vol
nv=len(v_list)
if nv == 1:
print("\n**** WARNING ****\nOnly one volume found in the 'disp' data files;")
print("NO V,T-fit of F is possible")
self.eos_off()
self.error_flag=True
return
free_val=np.array([])
for it in t_list:
for iv in v_list:
ifree=self.free(it,iv)
free_val=np.append(free_val,ifree)
free_val=free_val.reshape(nt,nv)
vl,tl=np.meshgrid(v_list,t_list)
vl=vl.flatten()
tl=tl.flatten()
free_val=free_val.flatten()
fit, pcov = curve_fit(self.free_vt_func, [vl, tl], free_val, p0 = x0)
self.fit_vt=fit
error=np.array([])
for it in t_list:
for iv in v_list:
f_calc=self.free_vt(it,iv)
f_obs=self.free(it,iv)
ierr=(f_calc-f_obs)**2
error=np.append(error,ierr)
mean_error=np.sqrt(np.mean(error))
max_error=np.sqrt(np.max(error))
print("V,T-fit of the Helmholtz free energy contribution from the off-centered modes")
print("V, T powers of the fit: %3i %3i" % (self.fit_vt_deg_v, self.fit_vt_deg_t))
print("Mean error: %5.2e" % mean_error)
print("Maximum error: %5.2e" % max_error)
if self.ex_flag:
print("Excluded modes: ", self.excluded_list)
if disp:
t_plot=np.linspace(min_t,max_t,40)
v_plot=np.linspace(np.min(vl),np.max(vl),40)
v_plot,t_plot=np.meshgrid(v_plot,t_plot)
v_plot=v_plot.flatten()
t_plot=t_plot.flatten()
h_plot=self.free_vt_func([v_plot, t_plot], *fit)
h_plot=h_plot.reshape(40,40)
v_plot=v_plot.reshape(40,40)
t_plot=t_plot.reshape(40,40)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111,projection='3d', )
ax.scatter(tl,vl,free_val,c='r')
ax.plot_surface(t_plot, v_plot, h_plot)
ax.set_xlabel("Temperature", labelpad=7)
ax.set_ylabel("Volume", labelpad=7)
ax.set_zlabel('F(T,V)', labelpad=8)
plt.show()
def free_vt_func(self,data,*par):
vv=data[0]
tt=data[1]
nterm=self.p_list.shape[0]
func=0.
for it in np.arange(nterm):
pv=self.p_list[it][0]
pt=self.p_list[it][1]
func=func+par[it]*(vv**pv)*(tt**pt)
return func
def free_vt(self,temp,volume):
return self.free_vt_func([volume,temp],*self.fit_vt)
def free_func(self,temp):
free_disp=np.polyval(self.fit,temp)
return free_disp
class volume_delta_class():
"""
Defines a suitable V range for the numerical evaluation of the
derivatives of any quantity with respect to V.
The V-range (delta) is obtained by multiplying the static equilibrium
volume (V0; which is computed by the static function) with a factor read
from the parame.py parameters' file; such parameter (frac) is stored
in the vd.frac variable and can also be set by the set_frac method.
The method set_delta computes delta, provided a volume is input.
When delta is computed, the vd.flag is set to True and its values
is used in several functions computing derivatives. On the contrary,
if vd.flag is set to False (use the method off), the delta
value is read from the parameters' file (pr.delta_v).
"""
def __init__(self):
self.v0=None
self.flag=False
self.delta=None
self.frac=pr.v_frac
def set_delta(self,vol=0.):
"""
Sets the V-delta value for the calculation of derivatives with
respect to V.
Args:
vol: if vol > 0.1, computes delta for the volume vol;
if vol < 0.1, vol is set to the default value stored
in the v0 variable.
"""
if vol < 0.1:
if self.v0 != None:
self.flag=True
self.delta=self.frac*self.v0
else:
war1="Warning: No volume provided for the set_delta method\n"
war2=" The delta value is read from the parameters file"
war=war1+war2+": %5.4f"
print(war % pr.delta_v)
self.flag=False
else:
self.delta=vol*self.frac
self.flag=True
self.v0=vol
def set_frac(self,frac):
self.frac=frac
def on(self):
self.flag=True
def off(self):
self.flag=False
class thermal_expansion_class():
"""
Interface for the computation of thermal expansion by different algorithms.
The method 'compute' performs the calculation by calling different functions
according to the 'method' keyword. Similarly, the method 'compute_serie'
performs the calculation of alpha as a function of temperature.
Several default parameters for the calculation are provided, which can
be set by the method 'set'.
The algortithms which are currently implemented can be listed by the method
'info'
The 'compute_serie' method perform the calculation of the thermal
expansion in a given T-range and, optionally, performs a power
series fit on the computed values. Data from the fit can optionally be
loaded in the internal database if a phase name is provided.
Note:
For the method 'k_alpha_eos', this class uses a specialized
plotting function from the plot.py module, whose parameters are
controlled by the plot.set_param method.
"""
def __init__(self):
self.method='k_alpha_dir'
self.nt=12
self.fix=0
self.fit=False
self.tex=False
self.save=False
self.phase=''
self.title=True
def set(self, method='k_alpha_dir', nt=12, fit=False, tex=False, save=False,\
phase='', title=True, fix=0.):
self.method=method
self.nt=nt
self.fix=fix
self.fit=fit
self.tex=tex
self.save=save
self.phase=phase
self.title=title
def info(self):
print("\nMethods currently implemented\n")
print("k_alpha_dir: computes alpha from the product K*alpha, through the")
print(" derivative of P with respect to T, at constant V")
print(" At any T and P, K and P are directly computed from")
print(" the Helmholtz free energy function derivatives. No EoS")
print(" is involved at any step;")
print("k_alpha_eos: same as k_alpha_dir, but pressures and bulk moduli")
print(" are computed from an EoS;")
print("alpha_dir: the computation is perfomed through the derivative")
print(" of the unit cell volume with respect to V; volumes are")
print(" calculated without reference to any EoS, by the function")
print(" volume_dir.")
def compute(self, tt, pp, method='default', fix=0, prt=False):
"""
Thermal expansion at a specific temperature and pressure
Args:
tt: temperature (K)
pp: pressure (GPa)
method: 3 methods are currently implemented ('k_alpha_dir',
'k_alpha_eos' and 'alpha_dir'); default 'k_alpha_dir'
fix: relevant for method 'k_alpha_eos' (default 0., Kp not fixed)
prt: relevant for method 'k_alpha_eos'; it controls printout
(default False)
"""
if method=='default':
method=self.method
if fix==0:
fix=self.fix
if method=='k_alpha_dir':
if prt:
alpha_dir_from_dpdt(tt, pp, prt)
else:
alpha,k,vol=alpha_dir_from_dpdt(tt, pp, prt)
return alpha
elif method=='k_alpha_eos':
exit=False
if not prt:
exit=True
alpha=thermal_exp_p(tt, pp, False, exit, fix=fix)
return alpha[0]
else:
thermal_exp_p(tt, pp, plot=False, ex=exit, fix=fix)
elif method=='alpha_dir':
alpha=alpha_dir(tt,pp)
if prt:
print("Thermal expansion: %6.2e K^-1" % alpha)
else:
return alpha
else:
msg="*** Warning: method "+method+" not implemented"
print(msg)
def compute_serie(self, tmin, tmax, pressure=0, nt=0, fit='default', tex='default',\
title='default', save='default', phase='default', method='default',\
prt=True, fix=0):
"""
Thermal expansion in a T-range
Args:
tmin, tmax: minimum and maximum temperature in the range
pressure: pressure (GPa); default 0
nt: number of points in the T-range; if nt=0, the default is chosen (12)
method: one of the three methods currently implemented
fit: if True, a power series fit is performed
phase: if fit is True and a phase name is specified (label), the data
from the power series fit are loaded in the internal database
fix: relevant for the method 'k_alpha_eos'; if fix is not 0.,
Kp is fixed at the specified value
title: if True, a title of the plot is provided
tex: if tex is True, laTeX formatting is provided
prt: relevant for the method 'k_alpha_eos'
save: if True, the plot is saved in a file
Note:
if save is True and method is 'k_alpha_eos', the name of the file
where the plot is saved is controlled by the plot.name and plot.ext variables.
The file resolution is controlled by the plot.dpi variable.
The appropriate parameters can be set by the set_param method
of the plot instance of the plot_class class (in the plot.py module)
Example:
>>> plot.set_param(dpi=200, name='alpha_k_eos_serie')
>>> thermal_expansion.compute_serie(100, 500, method='k_alpha_eos', save=True)
"""
if nt==0:
nt=self.nt
if fit=='default':
fit=self.fit
if tex=='default':
tex=self.tex
if title=='default':
title=self.title
if save=='default':
save=self.save
if phase=='default':
phase=self.phase
if method=='default':
method=self.method
t_list=np.linspace(tmin, tmax, nt)
t_plot=np.linspace(tmin, tmax, nt*10)
if method=='k_alpha_dir':
if fit and phase == '':
alpha_fit=alpha_dir_from_dpdt_serie(tmin, tmax, nt, pressure, fit, phase, save,\
title, tex)
return alpha_fit
else:
alpha_dir_from_dpdt_serie(tmin, tmax, nt, pressure, fit, phase, save,\
title, tex)
elif method=='alpha_dir':
if not fit:
alpha_dir_serie(tmin, tmax, nt, pressure, fit, prt=prt)
else:
alpha_fit=alpha_dir_serie(tmin, tmax, nt, pressure, fit, prt=prt)
if phase != '':
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
print("")
else:
return alpha_fit
elif method=='k_alpha_eos':
alpha_list=np.array([])
for it in t_list:
ia=self.compute(it, pressure, method='k_alpha_eos', fix=fix)
alpha_list=np.append(alpha_list, ia)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
if fit:
alpha_fit_plot=alpha_dir_fun(t_plot,*alpha_fit)
tit=''
if tex and title:
tit=r'Thermal expansion (method k\_alpha\_eos)'
elif title:
tit='Thermal expansion (method k_alpha_eos)'
if fit:
x=[t_list, t_plot]
y=[alpha_list, alpha_fit_plot]
style=['k*', 'k-']
lab=['Actual values', 'Power series fit']
if tex:
plot.multi(x,y,style, lab, xlab='Temperature (K)',\
ylab=r'$\alpha$ (K$^{-1}$)', title=tit, tex=True, save=save)
else:
plot.multi(x,y,style, lab, xlab='Temperature (K)',\
title=tit, ylab='Alpha (K$^{-1}$)', save=save)
else:
if tex:
plot.simple(t_list, alpha_list, xlab='Temperature (K)',\
ylab=r'$\alpha$ (K$^{-1}$)', title=tit, tex=True, save=save)
else:
plot.simple(t_list, alpha_list, xlab='Temperature (K)',\
title=tit, ylab='Alpha (K$^{-1}$)', save=save)
if fit:
if phase != '':
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
print("")
else:
return alpha_fit
else:
msg="*** Warning: method "+method+" not implemented"
print(msg)
# reads in data file. It requires a pathname to the folder
# containing data
def read_file(data_path):
global volume, energy, deg, data_vol_freq, num_set_freq
global num_mode, ini, int_set, int_mode, data_vol_freq_orig
global temperature_list, pcov, data_freq, path, data_file
global data, zu, apfu, power, lpow, power_a, lpow_a, mass
global flag_eos, flag_cp, flag_alpha, flag_err, flag_exp, flag_mass
global data_cp_exp, data_p_file, static_e0
flag_eos=False
flag_cp=False
flag_alpha=False
flag_err=False
flag_exp=False
flag_fit=False
flag_mass=False
flag_super=False
flag_static, flag_volume, flag_freq, flag_ini, flag_fu, flag_set, flag_p_static\
= False, False, False, False, False, False, False
path=data_path
input_file=data_path+'/'+'input.txt'
line_limit=100
with open(input_file) as fi:
jc=0
l0=['']
while (l0 !='END') and (jc < line_limit):
str=fi.readline()
lstr=str.split()
l0=''
if lstr !=[]:
l0=lstr[0].rstrip()
if l0 !='#':
if l0=='STATIC':
data_file=data_path+'/'+fi.readline()
data_file=data_file.rstrip()
flag_static=os.path.isfile(data_file)
elif l0=='PSTATIC':
data_p_file=data_path+'/'+fi.readline()
data_p_file=data_p_file.rstrip()
static_e0=fi.readline().rstrip()
flag_p_static=os.path.isfile(data_p_file)
print("\n*** INFO *** P/V static data found: use p_static")
print(" function to get a BM3-EoS")
elif l0=='VOLUME':
data_file_vol_freq=data_path+'/'+fi.readline()
data_file_vol_freq=data_file_vol_freq.rstrip()
flag_volume=os.path.isfile(data_file_vol_freq)
elif l0=='FREQ':
data_file_freq=data_path+'/'+fi.readline()
data_file_freq=data_file_freq.rstrip()
flag_freq=os.path.isfile(data_file_freq)
elif l0=='EXP':
data_file_exp=data_path+'/'+fi.readline()
data_file_exp=data_file_exp.rstrip()
flag_exp=os.path.isfile(data_file_exp)
elif l0=='LO':
lo_freq_file=data_path+'/'+fi.readline()
lo_freq_file=lo_freq_file.rstrip()
lo.flag=True
elif l0=='FITVOL':
fit_type=fi.readline()
fit_vol=fi.readline()
flag_fit=True
elif l0=='FU':
zu=fi.readline()
flag_fu=True
elif l0=='MASS':
mass=fi.readline()
flag_mass=True
elif l0=='SET':
istr=fi.readline()
while istr.split()[0] =='#':
istr=fi.readline()
int_set=istr
flag_set=True
elif l0=='TEMP':
temperature_list=fi.readline()
flag_eos=True
elif l0=='TITLE':
title=fi.readline().rstrip()
info.title=title
elif l0=='INI':
ini=fi.readline()
flag_ini=True
elif l0=='CP':
power=fi.readline()
flag_cp=True
elif l0=='ALPHA':
power_a=fi.readline()
flag_alpha=True
elif l0=='EXCLUDE':
exclude.restore()
ex_mode=fi.readline()
ex_mode=list(map(int, ex_mode.split()))
exclude.add(ex_mode)
elif l0=='KIEFFER':
kieffer.input=True
kieffer.flag=True
kief_freq=fi.readline()
kief_freq_inp=list(map(float, kief_freq.split()))
kief_freq=np.array(kief_freq_inp)*csl*h/kb
kieffer.kief_freq=kief_freq
kieffer.kief_freq_inp=kief_freq_inp
elif l0=='ANH':
anharm.nmode=int(fi.readline().rstrip())
anharm.mode=np.array([],dtype=int)
anharm.wgt=np.array([],dtype=int)
anharm.brill=np.array([],dtype=int)
for im in np.arange(anharm.nmode):
line=fi.readline().rstrip()
mw=list(map(int, line.split()))
mode=int(mw[0])
brill=int(mw[1])
wgt=int(mw[2])
anharm.mode=np.append(anharm.mode, mode)
anharm.wgt=np.append(anharm.wgt, wgt)
anharm.brill=np.append(anharm.brill, brill)
anharm.flag=True
elif l0=='SUPER':
line=fi.readline().rstrip()
line_val=list(map(int, line.split()))
snum=line_val[0]
static_vol=line_val[1]
flag_static_vol=False
if static_vol == 0:
flag_static_vol=True
flag_super=True
elif l0=='DISP':
disp.input_flag=True
disp.flag=True
disp.input=True
disp_file=data_path+'/'+fi.readline()
disp_info=data_path+'/'+fi.readline()
disp_file=disp_file.rstrip()
disp_info=disp_info.rstrip()
fd=open(disp_info)
line=fd.readline().rstrip().split()
disp.molt=int(line[0])
disp.fit_degree=int(line[1])
disp.fit_type=float(line[2])
disp.temp=float(line[3])
line=fd.readline().rstrip().split()
disp.numf=list(map(int, line))
line=fd.readline().rstrip().split()
disp.wgh=list(map(int, line))
line=fd.readline().rstrip().split()
disp.vol=list(map(float, line))
fd.close()
w_list=np.array([],dtype=int)
for iw in np.arange(disp.molt):
wl=np.repeat(disp.wgh[iw],disp.numf[iw])
w_list=np.append(w_list,wl)
disp.w_list=w_list
disp.f_size=disp.w_list.size
jc=jc+1
if jc>=line_limit:
print("\nWarning: END keyword not found")
if not flag_volume or not flag_freq or not (flag_static or flag_p_static):
print("\nError: one or more data file not found, or not assigned"
" in input")
flag_err=True
return
if not flag_fu:
print("\nError: mandatory FU keyword not found")
flag_err=True
return
if not flag_set:
print("\nError: mandatory SET keyword not found")
flag_err=True
return
fi.close()
if flag_view_input.value:
view_input(input_file)
print("\n-------- End of input file -------\n")
flag_view_input.off()
int_set=int_set.rstrip()
int_set=list(map(int, int_set.split()))
info.freq_sets=int_set
if flag_eos:
temperature_list=temperature_list.rstrip()
temperature_list=list(map(float,temperature_list.split()))
if flag_ini:
ini=ini.rstrip()
ini=list(map(float, ini.split()))
ini[1]=ini[1]*1e-21/conv
zus=list(map(int,zu.rstrip().split()))
zu=zus[0]
apfu=zus[1]
if flag_fit:
fit_type=fit_type.rstrip()
fit_vol=fit_vol.rstrip()
fit_vol=list(map(float, fit_vol.split()))
v_ini=fit_vol[0]
v_fin=fit_vol[1]
nv=int(fit_vol[2])
if fit_type=='SPLINE':
flag_spline.on()
flag_spline.set_degree(fit_vol[3])
flag_spline.set_smooth(fit_vol[4])
flag_spline.vol_range(v_ini, v_fin, nv)
info.fit_type='spline'
info.fit_degree=flag_spline.degree
info.fit_smooth=flag_spline.smooth
info.min_vol_fit=v_ini
info.max_vol_fit=v_fin
info.fit_points=nv
elif fit_type=='POLY':
flag_poly.on()
flag_poly.set_degree(fit_vol[3])
flag_poly.vol_range(v_ini, v_fin, nv)
info.fit_type='poly'
info.fit_degree=flag_poly.degree
info.min_vol_fit=v_ini
info.max_vol_fit=v_fin
info.fit_points=nv
if flag_super:
supercell.set(snum)
if flag_cp:
power=power.rstrip()
power=list(map(float, power.split()))
lpow=len(power)
test_cp=[ipw in cp_power_list for ipw in power]
if not all(test_cp):
print("WARNING: the power list for the Cp fit is not consistent")
print(" with the Perplex database")
print("Allowed powers:", cp_power_list)
print("Given powers:", power)
print("")
if flag_alpha:
power_a=power_a.rstrip()
power_a=list(map(float, power_a.split()))
lpow_a=len(power_a)
test_al=[ipw in al_power_list for ipw in power_a]
if not all(test_al):
print("WARNING: the power list for the alpha fit is not consistent")
print(" with the Perplex database")
print("Allowed powers:", al_power_list)
print("Given powers:", power_a)
print("")
if flag_mass:
mass=float(mass.rstrip())
b_flag=False
if anharm.flag:
anharm_setup()
for im,ib in zip(anharm.mode, anharm.brill):
if ib == 0:
exclude.add([im])
else:
disp.free_exclude([im])
b_flag=True
if disp.flag:
disp.freq=np.array([])
disp_data=np.loadtxt(disp_file)
disp.deg=disp_data[:,0]
nset=len(disp.vol)
disp.nset=nset
for iv in np.arange(nset):
disp.freq=np.append(disp.freq, disp_data[:,iv+1])
disp.freq=disp.freq.reshape(nset,disp.f_size)
if disp.fit_type == 0:
disp.freq_fit()
else:
disp.freq_spline_fit()
disp.free_fit(disp.temp,disp.vol[0])
data=np.loadtxt(data_file)
if flag_p_static:
static_e0=float(static_e0)
data_vol_freq_orig=np.loadtxt(data_file_vol_freq)
lo.data_freq=np.loadtxt(data_file_freq)
lo.data_freq_orig=np.copy(lo.data_freq)
info.min_freq_vol=min(data_vol_freq_orig)
info.max_freq_vol=max(data_vol_freq_orig)
info.freq_points=len(data_vol_freq_orig)
if flag_exp:
data_cp_exp=np.loadtxt(data_file_exp)
volume=data[:,0]
energy=data[:,1]
if flag_super:
if flag_static_vol:
volume=volume*snum
energy=energy*snum
info.min_static_vol=min(volume)
info.max_static_vol=max(volume)
info.static_points=len(volume)
deg=lo.data_freq[:,0]
num_set_freq=lo.data_freq.shape[1]-1
num_mode=lo.data_freq.shape[0]-1
int_mode=np.arange(num_mode+1)
if flag_super:
deg=deg/supercell.number
if not flag_ini:
ini=init_bm3(volume,energy)
data_vol_freq=[]
for iv in int_set:
data_vol_freq=np.append(data_vol_freq, data_vol_freq_orig[iv])
int_set_new=np.array([],dtype='int32')
ind=data_vol_freq.argsort()
for ind_i in ind:
int_set_new=np.append(int_set_new, int_set[ind_i])
if not np.array_equal(int_set, int_set_new):
print("\nWarning ** Volume and frequencies lists have been sorted")
print(" indexing: ", ind)
print("")
int_set=int_set_new
data_vol_freq.sort()
info.min_select_vol=min(data_vol_freq)
info.max_select_vol=max(data_vol_freq)
info.select_points=len(data_vol_freq)
volume_ctrl.set_all()
if flag_fit:
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
if lo.flag:
lo_data=np.loadtxt(lo_freq_file)
lo.mode=lo_data[:,0].astype(int)
lo.split=lo_data[:,1].astype(float)
lo.on()
if disp.input and kieffer.input:
kieffer.flag=False
print("\nBoth Kieffer and phonon dispersion data were found in the input file")
print("The Kieffer model is therefore deactivated")
if b_flag:
print("")
disp.free_fit_vt()
def view():
"""
View input file (input.txt)
"""
input_file=path+"/input.txt"
view_input(input_file)
def view_input(input_file):
line_limit=1000
print("\nInput file\n")
with open(input_file) as fi:
jc=0
l0=['']
while (l0 !='END') and (jc < line_limit):
str=fi.readline()
lstr=str.split()
if lstr !=[]:
l0=lstr[0].rstrip()
if l0 !='#':
print(str.rstrip())
jc=jc+1
def reload_input(path):
reset_flag()
read_file(path)
static()
def load_disp(disp_info, disp_file):
"""
Load files containing data for the phonon dispersion correction. These
are the same files that could be also specified under the keyword DISP
in the input.txt file.
Args:
disp_info: name of the info file
disp_file: name of the frequencies' file
"""
disp.input_flag=True
disp.flag=True
disp.input=True
disp_file=path_orig+'/'+disp_file
disp_info=path_orig+'/'+disp_info
fd=open(disp_info)
line=fd.readline().rstrip().split()
disp.molt=int(line[0])
disp.fit_degree=int(line[1])
disp.fit_type=float(line[2])
disp.temp=float(line[3])
line=fd.readline().rstrip().split()
disp.numf=list(map(int, line))
line=fd.readline().rstrip().split()
disp.wgh=list(map(int, line))
line=fd.readline().rstrip().split()
disp.vol=list(map(float, line))
fd.close()
disp.error_flag=False
if len(disp.vol) == 1:
disp.error_flag=True
w_list=np.array([],dtype=int)
for iw in np.arange(disp.molt):
wl=np.repeat(disp.wgh[iw],disp.numf[iw])
w_list=np.append(w_list,wl)
disp.w_list=w_list
disp.f_size=disp.w_list.size
disp.freq=np.array([])
disp_data=np.loadtxt(disp_file)
disp.deg=disp_data[:,0]
nset=len(disp.vol)
disp.nset=nset
for iv in np.arange(nset):
disp.freq=np.append(disp.freq, disp_data[:,iv+1])
disp.freq=disp.freq.reshape(nset,disp.f_size)
if disp.fit_type == 0:
disp.freq_fit()
else:
disp.freq_spline_fit()
disp.free_fit(disp.temp,disp.vol[0])
print("Phonon dispersion data loaded from the file %s" % disp_file)
print("Info data from the file %s" % disp_info)
print("Phonon frequencies are computed at the volume(s) ", disp.vol)
print("\nUse disp.free_fit_ctrl to get free energy surfaces F(T) or F(V,T)")
def set_fix(fix=4.):
"""
Sets Kp to a value and keeps it fixed during fitting of EoS
Args:
fix (optional): Kp value. Default 4.
if fix=0, Kp if fixed to the last computed value stored in info.kp
The flag f_fit.flag is set to True
"""
if fix == 0:
fix=info.kp
f_fix.on(fix)
def reset_fix():
"""
Resets the fix Kp option: f_fit.flag=False
"""
f_fix.off()
def fix_status():
"""
Inquires about the setting concerning Kp
"""
print("Fix status: %r" % f_fix.flag)
if f_fix.flag:
print("Kp fixed at %4.2f" % f_fix.value )
def set_spline(degree=3,smooth=5, npoint=16):
"""
Sets spline fits of the frequencies as function of volume
Args:
degree (optional): degree of the spline (default: 3)
smooth (optional): smoothness of the spline (default: 5)
npoint (optional): number of points of the spline function
(default: 16)
"""
dv=0.2
flag_spline.on()
flag_poly.off()
flag_spline.set_degree(degree)
flag_spline.set_smooth(smooth)
fit_vol_exists=True
try:
flag_spline.fit_vol
except AttributeError:
fit_vol_exists=False
if not fit_vol_exists:
set_volume_range(min(data_vol_freq)-dv,max(data_vol_freq)+dv,npoint,\
prt=True)
else:
set_volume_range(min(flag_spline.fit_vol),max(flag_spline.fit_vol),npoint)
flag_spline.stack()
info.fit_type='spline'
info.fit_degree=degree
info.fit_smooth=smooth
info.fit_points=npoint
info.min_vol_fit=min(flag_spline.fit_vol)
info.max_vol_fit=max(flag_spline.fit_vol)
def set_poly(degree=4,npoint=16):
"""
Sets polynomial fits of the frequencies as function of volume
Args:
degree (optional): degree of the spline (default: 4)
npoint (optional): number of points of the polynomial function
(default: 16)
"""
dv=0.2
flag_poly.on()
flag_spline.off()
flag_poly.set_degree(degree)
fit_vol_exists=True
try:
flag_poly.fit_vol
except AttributeError:
fit_vol_exists=False
if not fit_vol_exists:
set_volume_range(min(data_vol_freq)-dv,max(data_vol_freq)+dv,npoint, \
prt=True)
else:
set_volume_range(min(flag_poly.fit_vol),max(flag_poly.fit_vol),npoint)
flag_poly.stack()
info.fit_type='poly'
info.fit_degree=degree
info.fit_points=npoint
info.min_vol_fit=min(flag_poly.fit_vol)
info.max_vol_fit=max(flag_poly.fit_vol)
def set_volume_range(vini,vfin,npoint=16,prt=False):
"""
Defines a volume range for the fitting of frequencies and EoS
in the case that SPLINE or POLY fits have been chosen
Args:
vini: minimum volume
vfin: maximum volume
npoint (optional): number of points in the volume range
"""
if flag_poly.flag:
flag_poly.vol_range(vini,vfin,npoint)
flag_poly.stack()
info.fit_points=npoint
info.min_vol_fit=min(flag_poly.fit_vol)
info.max_vol_fit=max(flag_poly.fit_vol)
if prt:
print("Volume range %8.4f - %8.4f defined for 'POLY' fit" %\
(vini, vfin))
elif flag_spline.flag:
flag_spline.vol_range(vini,vfin,npoint)
flag_spline.stack()
info.fit_points=npoint
info.min_vol_fit=min(flag_spline.fit_vol)
info.max_vol_fit=max(flag_spline.fit_vol)
if prt:
print("Volume range %8.4f - %8.4f defined for 'SPLINE' fit" %\
(vini, vfin))
else:
print("No fit of frequencies active\nUse set_poly or set_spline\n")
def fit_status():
if flag_poly.flag or flag_spline.flag:
print("Fit of frequencies is active")
if flag_spline.flag:
print("Spline fit: degree %2d, smooth: %3.1f" \
% (flag_spline.degree, flag_spline.smooth))
print("Volume range: %5.2f - %5.2f, points=%d" % \
(min(flag_spline.fit_vol), max(flag_spline.fit_vol), \
flag_spline.fit_vol.size))
else:
print("Polynomial fit: degree %2d" % flag_poly.degree)
print("Volume range: %5.2f - %5.2f, points=%d" % \
(min(flag_poly.fit_vol), max(flag_poly.fit_vol), \
flag_poly.fit_vol.size))
else:
print("Fitting is off")
def fit_off():
flag_poly.off()
flag_spline.off()
info.fit_type='No fit'
def quick_start(path):
"""
Quick start of the program.
Reads the input files found under the folder 'path'
whose name is written in the 'quick_start.txt' file
(found in the master folder).
Executes read_file; static (static equation of state)
and stacks data for the application of the Kieffer model,
if required with the optional 'KIEFFER' keyword in input.txt
"""
read_file(path)
static(plot=False)
if kieffer.flag:
free_stack_t(pr.kt_init, pr.kt_fin, pr.kt_points)
if verbose.flag:
print("Results from the Kieffer model for acoustic branches:")
print("plot of the Helmholtz free energy as a function of T.")
print("Temperature limits and number of points defined in parame.py")
kieffer.plot()
else:
print("Kieffer model for the acoustic branches activated")
def v_bm3(vv,v0,k0,kp,c):
"""
Volume integrated Birch-Murnaghan equation (3^rd order)
Args:
vv: volume
v0: volume at the minimum of the energy
k0: bulk modulus
kp: derivative of k0 with respect to P
c: energy at the minimum
Returns:
the energy at the volume vv
"""
v0v=(np.abs(v0/vv))**(2/3)
f1=kp*(np.power((v0v-1.),3))
f2=np.power((v0v-1.),2)
f3=6.-4*v0v
return c+(9.*v0*k0/16.)*(f1+f2*f3)
def bm3(vv,v0,k0,kp):
"""
Birch-Murnaghan equation (3^rd order)
Args:
vv: volume
v0: volume at the minimum of the energy
k0: bulk modulus
kp: derivative of k0 with respect to P
Returns:
the pressure at the volume vv
"""
v0v7=np.abs((v0/vv))**(7/3)
v0v5=np.abs((v0/vv))**(5/3)
v0v2=np.abs((v0/vv))**(2/3)
f1=v0v7-v0v5
f2=(3/4)*(kp-4)*(v0v2-1)
return (3*k0/2)*f1*(1+f2)
def bmx_tem(tt,**kwargs):
"""
V-BMx (volume integrated) fit at the selected temperature
Args:
tt: temperature
Keyword Args:
fix: if fix > 0.1, kp is fixed to the value 'fix'
during the optimization of the EoS.
(this is a valid option only for the BM3 fit,
but it is ignored for a BM4 EoS)
Returns:
1. free energy values at the volumes used for the fit
2. optimized v0, k0, kp, (kpp), and c
3. covariance matrix
Note:
bmx_tem optimizes the EoS according to several
possible options specified elsewhere:
1. kp fixed or free
2. frequencies not fitted, or fitted by
polynomials or splines
3. 3^rd or 4^th order BM EoS
Note:
bmx_tem includes energy contributions from static and vibrational
optical modes; acoustic contributions from the modified Kieffer
model are included, provided the KIEFFER keyword is in the input
file; contributions from anharmonic modes are included, provided
the ANH keyword is in the input file. NO dispersion correction
is included (even is the DISP keyword is provided).
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
flag_x=False
volb=data_vol_freq
if flag_poly.flag:
volb=flag_poly.fit_vol
elif flag_spline.flag:
volb=flag_spline.fit_vol
if f_fix.flag:
fix=f_fix.value
flag_x=True
p0_f=[ini[0],ini[1],ini[3]]
if fixpar:
if fix_value < 0.1:
flag_x=False
else:
fix=fix_value
flag_x=True
p0_f=[ini[0],ini[1],ini[3]]
if flag_poly.flag or flag_spline.flag:
free_energy=free_fit(tt)
else:
free_energy=free(tt)
if (flag_x) and (not bm4.flag):
pterm, pcov_term = curve_fit(lambda volb, v0, k0, c: \
v_bm3(volb, v0, k0, fix, c), \
volb, free_energy, p0=p0_f, \
ftol=1e-15, xtol=1e-15)
pterm=np.append(pterm,pterm[2])
pterm[2]=fix
else:
if bm4.flag:
if f_fix.flag:
reset_fix()
fix_status()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pterm, pcov_term= curve_fit(bm4.energy, volb, free_energy,\
method='dogbox',p0=bm4.en_ini, ftol=1e-18, xtol=3.e-16,gtol=1e-18)
bm4.store(pterm)
else:
pterm, pcov_term = curve_fit(v_bm3, volb, free_energy, \
p0=ini, ftol=1e-15, xtol=1e-15)
return [free_energy, pterm, pcov_term]
def bulk_conversion(kk):
"""
Bulk modulus unit conversion (from atomic units to GPa)
"""
kc=kk*conv/1e-21
print("Bulk modulus: %8.4e a.u. = %6.2f GPa" % (kk, kc))
def stop():
"""
used to exit from the program in case of fatal exceptions
"""
while True:
print("Program will be terminated due to errors in processing data")
answ=input('Press enter to quit')
sys.exit(1)
def bm4_def():
V0=sym.Symbol('V0',real=True,positive=True)
V=sym.Symbol('V',real=True,positive=True)
f=sym.Symbol('f',real=True)
kp=sym.Symbol('kp',real=True)
ks=sym.Symbol('ks',real=True)
k0=sym.Symbol('k0',real=True)
P=sym.Symbol('P',real=True,positive=True)
E0=sym.Symbol('E0',real=True)
c=sym.Symbol('c',real=True)
f=((V0/V)**sym.Rational(2,3)-1)/2
P=3*k0*f*((1+2*f)**sym.Rational(5,2))*(1+sym.Rational(3,2)*(kp-4.)*f +\
sym.Rational(3,2)*(k0*ks+(kp-4.)*(kp-3.)+sym.Rational(35,9))*(f**2))
E=sym.integrate(P,V)
E0=E.subs(V,V0)
E=E0-E+c
bm4_energy=sym.lambdify((V,V0,k0,kp,ks,c),E,'numpy')
bm4_pressure=sym.lambdify((V,V0,k0,kp,ks),P,'numpy')
return bm4_energy, bm4_pressure
def init_bm4(vv,en,kp):
"""
Function used to estimate the initial parameters of a V-integrated BM4
EoS. The function is used by the method "estimates" of the bm4 class.
The estimation is done on the basis of a previous BM3 optimization
whose initial parameters are provided by the current function.
Args:
vv (list): volumes
en (list): static energies at the corresponding volumes vv
kp:initail value assigned to kp
Returns:
"ini" list of V-integrated EoS parameters (for a BM3) estimated by a
polynomial fit: v_ini, k0_ini, kp, e0_ini.
Note: such parameters are used as initial guesses for the BM3 optimization
performed by the method "estimates" of the class bm4 that, in turn,
outputs the "ini" list for the BM4 EoS optimization.
"""
pol=np.polyfit(vv,en,4)
pder1=np.polyder(pol,1)
pder2=np.polyder(pol,2)
v_r=np.roots(pder1)
vs=v_r*np.conj(v_r)
min_r=np.argmin(vs)
v_ini=np.real(v_r[min_r])
e0_ini=np.polyval(pol, v_ini)
k0_ini=np.polyval(pder2, v_ini)
k0_ini=k0_ini*v_ini
ini=[v_ini, k0_ini, kp, e0_ini]
return ini
def init_bm3(vv,en):
"""
Estimates initial parameters for the V-integrated BM3 EoS in case
the INI keyword is not present in "input.txt"
Args:
vv (list): volumes
en (list): static energies at the corresponding volumes vv
Returns:
"ini" list of V-integrated EoS parameters estimated by a
polynomial fit: v_ini, k0_ini, kp, e0_ini. kp is set to 4.
Note:
such parameters are used as initial guesses for the bm3 optimization.
"""
kp_ini=4.
pol=np.polyfit(vv,en,3)
pder1=np.polyder(pol,1)
pder2=np.polyder(pol,2)
v_r=np.roots(pder1)
vs=v_r*np.conj(v_r)
min_r=np.argmin(vs)
v_ini=np.real(v_r[min_r])
e0_ini=np.polyval(pol, v_ini)
k0_ini=np.polyval(pder2, v_ini)
k0_ini=k0_ini*v_ini
ini=[v_ini, k0_ini, kp_ini, e0_ini]
return ini
# Output the pressure at a given temperature (tt) and volume (vv).
# Kp can be kept fixed (by setting fix=Kp > 0.1)
def pressure(tt,vv,**kwargs):
"""
Computes the pressure at a temperature and volume
Args:
tt: temperature
vv: unit cell volume
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
[ff,veos,err]=bmx_tem(tt,fix=fix_value)
else:
[ff,veos,err]=bmx_tem(tt)
if bm4.flag:
eos=veos[0:4]
return round(bm4.pressure(vv,*eos)*conv/1e-21,3)
else:
eos=veos[0:3]
return round(bm3(vv,*eos)*conv/1e-21,3)
def pressure_dir(tt,vv):
"""
Computes the pressure at a given volume and temperature from
the numerical derivative of the Helmholtz free energy with
respect to the volume (at constant temperature).
Args:
tt: temperature (K)
vv: volume (A^3)
"""
deg=pr.degree_v
if not vd.flag:
vmin=vv-pr.delta_v/2.
vmax=vv+pr.delta_v/2.
else:
vmin=vv-vd.delta/2.
vmax=vv+vd.delta/2.
v_range=np.linspace(vmin,vmax,pr.nump_v)
f_list=np.array([])
for iv in v_range:
fi=free_fit_vt(tt,iv)
f_list=np.append(f_list,fi)
vfit=np.polyfit(v_range,f_list,deg)
vfitder=np.polyder(vfit,1)
press=-1*np.polyval(vfitder,vv)
return press*conv/1e-21
def volume_dir(tt,pp,alpha_flag_1=False, alpha_flag_2=False):
"""
Computes the equilibrium volume at a given temperature and pressure
without using an equation of state.
An initial estimation of the volume is however obtained by using
a BM3 EoS, by calling the eos_temp function; such volume is stored
in the v_new variable.
A list of volumes around the v_new value is then built and, for each
value in the list, a pressure is computed by using the pressure_dir
function, and compared to the input pressure to find the volume
at which the two pressures are equal.
A number of parameters are used to control the computation. They are
all defined by the volume-control driver (volume_ctrl). Convenient
values are already set by default, but they can be changed by using
the method volume_ctrl.set_all. Use the info.show method to get such
values under the 'volume driver section'.
"""
vol_opt.on()
if volume_ctrl.kp_fix:
reset_fix()
if tt < volume_ctrl.t_max:
eos_temp(tt,kp_only=True)
else:
eos_temp(volume_ctrl.t_max,kp_only=True)
set_fix(0)
if (alpha_flag_1) and (not alpha_flag_2):
reset_fix()
eos_temp(tt,kp_only=True)
set_fix(0)
vini=new_volume(tt,pp)
v_new=vini[0] # Initial volume from EoS
if volume_ctrl.t_last_flag:
vini=volume_ctrl.v_last
if (tt > volume_ctrl.t_last) & (volume_ctrl.t_last > 10.):
volume_ctrl.t_last_flag=True
volume_ctrl.shift=0.
volume_ctrl.upgrade_shift=False
if not flag_poly.flag:
if flag_fit_warning.value:
print("It is advised to use polynomial fits for 'dir' calculations\n")
fit_status()
print("")
flag_fit_warning.value=False
if flag_poly.flag:
volume_max=max(flag_poly.fit_vol)
volume_min=min(flag_poly.fit_vol)
if flag_spline.flag:
volume_max=max(flag_spline.fit_vol)
volume_min=min(flag_spline.fit_vol)
if flag_poly.flag:
if vini > volume_max:
flag_volume_max.value=True
if flag_volume_warning.value:
flag_volume_warning.value=False
print("Warning: volume exceeds the maximum value set in volume_range")
print("Volume: %8.4f" % vini)
fit_status()
print("")
# return vini
if flag_spline.flag:
if vini > volume_max:
flag_volume_max.value=True
if flag_volume_warning.value:
flag_volume_warning.value=False
print("Warning: volume exceeds the maximum value set in volume_range")
print("Volume: %8.4f" % vini)
fit_status()
print("")
# return vini
vvi=vini
if volume_ctrl.t_last_flag:
if (tt > volume_ctrl.t_last) & (volume_ctrl.t_last > 10.):
vvi=volume_ctrl.v_last
vplot=vvi
v_list=np.linspace(vvi - volume_ctrl.delta/volume_ctrl.left,\
vvi + volume_ctrl.delta/volume_ctrl.right, 24)
else:
if tt > volume_ctrl.t_dump:
volume_ctrl.shift=volume_ctrl.shift/volume_ctrl.dump
v_list=np.linspace(vini[0]-volume_ctrl.shift - volume_ctrl.delta/volume_ctrl.left,\
vini[0]-volume_ctrl.shift + volume_ctrl.delta/volume_ctrl.right, 24)
vplot=vini[0]
p_list=np.array([])
for iv in v_list:
pi=(pressure_dir(tt,iv)-pp)**2
p_list=np.append(p_list,pi)
fitv=np.polyfit(v_list,p_list,volume_ctrl.degree)
pressure=lambda vv: np.polyval(fitv,vv)
min_p=np.argmin(p_list)
vini=[v_list[min_p]]
if volume_ctrl.degree > 2:
bound=[(volume_min, volume_max)]
vmin=minimize(pressure,vini,method='L-BFGS-B', bounds=bound, tol=1e-10,
options={'gtol':1e-10, 'maxiter':500})
shift=v_new-vmin.x[0]
else:
shrink=volume_ctrl.quad_shrink
new_v=np.linspace(vini[0]-volume_ctrl.delta/shrink, vini[0]+volume_ctrl.delta/shrink,8)
new_p=np.array([])
for iv in new_v:
pi=(pressure_dir(tt,iv)-pp)**2
new_p=np.append(new_p,pi)
fit_new=np.polyfit(new_v, new_p,2)
der_new=np.polyder(fit_new,1)
vmin=-1*der_new[1]/der_new[0]
shift=v_new-vmin
if volume_ctrl.upgrade_shift:
volume_ctrl.shift=shift
if volume_ctrl.degree > 2:
if volume_ctrl.debug:
x1=np.mean(v_list)
x2=np.min(v_list)
x=(x1+x2)/2
y=0.95*np.max(p_list)
y2=0.88*np.max(p_list)
y3=0.81*np.max(p_list)
y4=0.74*np.max(p_list)
plt.figure()
title="Temperature: "+str(round(tt,2))+" K"
plt.plot(v_list,p_list)
plt.xlabel("V (A^3)")
plt.ylabel("Delta_P^2 (GPa^2)")
plt.title(title)
v_opt="Opt volume: "+str(vmin.x[0].round(4))
v_min="Approx volume: "+str(vini[0].round(4))
v_new="EoS volume: "+str(v_new.round(4))
v_ini="V_ini volume: "+str(vplot.round(4))
plt.text(x,y,v_opt,fontfamily='monospace')
plt.text(x,y2,v_min, fontfamily='monospace')
plt.text(x,y3,v_new,fontfamily='monospace')
plt.text(x,y4,v_ini,fontfamily='monospace')
plt.show()
else:
if volume_ctrl.debug:
x1=np.mean(v_list)
x2=np.min(v_list)
x=(x1+x2)/2
y=0.95*np.max(p_list)
y2=0.88*np.max(p_list)
y3=0.81*np.max(p_list)
y4=0.74*np.max(p_list)
plt.figure()
title="Temperature: "+str(round(tt,2))+" K"
plt.plot(v_list,p_list)
plt.plot(new_v, new_p,"*")
plt.xlabel("V (A^3)")
plt.ylabel("Delta_P^2 (GPa^2)")
plt.title(title)
v_opt="Opt. volume: "+str(round(vmin,4))
v_min="Approx volume: "+str(vini[0].round(4))
v_new="EoS Volume: "+str(v_new.round(4))
v_ini="V_ini volume: "+str(vplot.round(4))
plt.text(x,y,v_opt,fontfamily='monospace')
plt.text(x,y2,v_min, fontfamily='monospace')
plt.text(x,y3,v_new,fontfamily='monospace')
plt.text(x,y4,v_ini,fontfamily='monospace')
plt.show()
if volume_ctrl.degree > 2:
test=vmin.success
if not test:
print("\n**** WARNING ****")
print("Optimization in volume_dir not converged; approx. volume returned")
print("temperature: %5.2f, Volume: %6.3f" % (tt, vini[0]))
volume_ctrl.v_last=vini[0]
vol_opt.off()
return vini[0]
else:
volume_ctrl.v_last=vini[0]
return vmin.x[0]
else:
volume_ctrl.v_last=vmin
return vmin
def volume_from_F(tt, shrink=10., npoints=60, debug=False):
"""
Computation of the equilibrium volume at any given temperature
and at 0 pressure. The algorithm looks for the minimum of the
Helmholtz function with respect to V (it is equivalent to the
minimization of the Gibbs free energy function as the pressure is
zero. The methods is very similar to that implemented in the
more general volume_dir function, but it does not require the
calculation of any derivative of F (to get the pressure).
The Helmholtz free energy is computed by means of the free_fit_vt
function.
Args:
tt: temperature (in K)
npoints: number of points in the V range (centered around an
initial volume computed by the volume_dir function),
where the minimum of F is to be searched (default 60).
shrink: shrinking factor for the definition of the V-range for
the optimization of V (default 10).
debug: plots and prints debug information. If debug=False, only
the optimized value of volume is returned.
Note:
The function makes use of parameters sets by the methods of
the volume_F_ctrl instance of the volume_F_control_class class.
In particular, the initial value of volume computed by the
volume_dir function can be shifted by the volume_F_ctrl.shift
value. This value is set by the volume_F_ctrl.set_shift method
provided that the volume_F_ctrl.upgrade_shift flag is True.
"""
delta=volume_ctrl.delta
d2=delta/2.
vini=volume_dir(tt,0)
if volume_F_ctrl.get_flag():
shift=volume_F_ctrl.get_shift()
vini=vini+shift
v_eos=new_volume(tt,0)[0]
vlist=np.linspace(vini-d2, vini+d2, npoints)
flist=list(free_fit_vt(tt, iv) for iv in vlist)
imin=np.argmin(flist)
vmin=vlist[imin]
vlist2=np.linspace(vmin-d2/shrink, vmin+d2/shrink, 8)
flist2=list(free_fit_vt(tt, iv) for iv in vlist2)
fit=np.polyfit(vlist2,flist2,2)
fitder=np.polyder(fit,1)
vref=-fitder[1]/fitder[0]
fref=np.polyval(fit, vref)
v_shift=vref-vini
if volume_F_ctrl.get_flag() & volume_F_ctrl.get_upgrade_status():
volume_F_ctrl.set_shift(v_shift)
vplot=np.linspace(vref-d2/shrink, vref+d2/shrink, npoints)
fplot=np.polyval(fit, vplot)
if debug:
xt=vlist2.round(2)
title="F free energy vs V at T = "+str(tt)+" K"
plt.figure()
ax=plt.gca()
ax.ticklabel_format(useOffset=False)
plt.plot(vlist2, flist2, "k*", label="Actual values")
plt.plot(vplot, fplot, "k-", label="Quadratic fit")
plt.plot(vref,fref,"r*", label="Minimum from fit")
plt.legend(frameon=False)
plt.xlabel("Volume (A^3)")
plt.ylabel("F (a.u.)")
plt.xticks(xt)
plt.title(title)
plt.show()
print("\nInitial volume from volume_dir: %8.4f" % vini)
print("Volume from EoS fit: %8.4f" % v_eos)
print("Approx. volume at minimum F (numerical): %8.4f" % vmin)
print("Volume at minimum (from fit): %8.4f\n" % vref)
return vref
else:
return vref
def volume_from_F_serie(tmin, tmax, npoints, fact_plot=10, debug=False, expansion=False, degree=4,
fit_alpha=False, export=False, export_alpha=False, export_alpha_fit=False):
"""
Volume and thermal expansion (at zero pressure) in a range of temperatures,
computed by the minimization of the Helmholtz free energy function.
Args:
tmin, tmax, npoints: minimum, maximum and number of points defining
the T range
fact_plot: factor used to compute the number of points for the plot
(default 10)
debug: debugging information (default False)
expansion: computation of thermal expansion (default False)
degree: if expansion=True, in order to compute the thermal expansion
a log(V) vs T polynomial fit of degree 'degree' is performed
(default 4)
fit_alpha: thermal expansion is fitted to a power serie (default False)
export: list of computed volume is exported (default False)
export_alpha_fit: coefficients of the power series fitting the alpha's
are exported
Note:
Thermal expansion is computed from a log(V) versus T polynomial fit
Note:
if export is True, the volume list only is exported (and the function
returns) no matter if expansion is also True (that is, thermal expansion
is not computed). Likewise, if export_alfa is True, no fit of the thermal
expansion data on a power serie is performed (and, therefore, such data from
the fit cannot be exported).
Note:
Having exported the coefficients of the power serie fitting the alpha values,
they can be uploaded to a particular phase by using the load_alpha method
of the mineral class; e.g. py.load_alpha(alpha_fit, power_a)
Examples:
>>> alpha_fit=volume_from_F_serie(100, 400, 12, expansion=True, fit_alpha=True, export_alpha_fit=True)
>>> py.load_alpha(alpha_fit, power_a)
>>> py.info()
"""
t_list=np.linspace(tmin, tmax, npoints)
v_list=list(volume_from_F(it, debug=debug) for it in t_list)
if export:
return v_list
plt.figure()
plt.plot(t_list, v_list, "k-")
plt.xlabel("T (K)")
plt.ylabel("V (A^3)")
plt.title("Volume vs Temperature at zero pressure")
plt.show()
if expansion:
logv=np.log(v_list)
fit=np.polyfit(t_list, logv, degree)
fitder=np.polyder(fit, 1)
alpha_list=np.polyval(fitder, t_list)
if export_alpha:
return alpha_list
t_plot=np.linspace(tmin, tmax, npoints*fact_plot)
lv_plot=np.polyval(fit, t_plot)
label_fit="Polynomial fit, degree: "+str(degree)
plt.figure()
plt.title("Log(V) versus T")
plt.xlabel("T (K)")
plt.ylabel("Log(V)")
plt.plot(t_list, logv, "k*", label="Actual values")
plt.plot(t_plot, lv_plot, "k-", label=label_fit)
plt.legend(frameon=False)
plt.show()
plt.figure()
plt.title("Thermal expansion")
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.plot(t_list, alpha_list, "k*", label="Actual values")
if fit_alpha:
if not flag_alpha:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
else:
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
alpha_value=[]
for ict in t_plot:
alpha_i=alpha_dir_fun(ict,*alpha_fit)
alpha_value=np.append(alpha_value,alpha_i)
plt.plot(t_plot,alpha_value,"k-", label="Power serie fit")
plt.legend(frameon=False)
plt.show()
if export_alpha_fit & flag_alpha & fit_alpha:
return alpha_fit
def volume_conversion(vv, atojb=True):
"""
Volume conversion from/to unit cell volume (in A^3) to/from the molar volume
(in J/bar)
Args:
vv: value of volume (in A^3 or J/bar)
atojb: if aotjb is True (default), conversion is from A^3 to J/bar
if atojb is False, conversion is from J/bar to A^3
"""
if atojb:
vv=vv*avo*1e-25/zu
print("Molar volume: %7.4f J/bar" % vv)
else:
vv=vv*zu*1e25/avo
print("Cell volume: %7.4f A^3" % vv)
def find_temperature_vp(vv,pp, tmin=100., tmax=1000., prt=True):
nt=50
t_list=np.linspace(tmin,tmax,nt)
v_list=list(volume_dir(it,pp) for it in t_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
t_0=t_list[min_diff]
delta=20.
t_min=t_0-delta
t_max=t_0+delta
t_list=np.linspace(t_min,t_max,nt)
v_list=list(volume_dir(it,pp) for it in t_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
t_0f=t_list[min_diff]
if prt:
print("Temperature found:")
print("First guess %5.2f; result: %5.2f K" % (t_0, t_0f))
else:
return t_0f
def find_pressure_vt(vv,tt, pmin, pmax, prt=True):
npp=50
p_list=np.linspace(pmin,pmax,npp)
v_list=list(volume_dir(tt,ip) for ip in p_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
p_0=p_list[min_diff]
delta=0.5
p_min=p_0-delta
p_max=p_0+delta
p_list=np.linspace(p_min,p_max,npp)
v_list=list(volume_dir(tt,ip) for ip in p_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
p_0f=p_list[min_diff]
if prt:
print("Pressure found:")
print("First guess %5.2f; result: %5.2f GPa" % (p_0, p_0f))
else:
return p_0f
def bulk_dir(tt,prt=False, out=False, **kwargs):
"""
Optimizes a BM3 EoS from volumes and total pressures at a given
temperature. In turn, phonon pressures are directly computed as volume
derivatives of the Helmholtz function; static pressures are from a V-BM3
fit of E(V) static data.
Negative pressures are excluded from the computation.
Args:
tt: temperature
prt (optional): if True, prints a P(V) list; default: False
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
"""
flag_volume_max.value=False
l_arg=list(kwargs.items())
fixpar=False
flag_serie=False
vol_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if 'serie' == karg_i[0]:
flag_serie=karg_i[1]
if 'volume' == karg_i[0]:
vol_flag=karg_i[1]
[dum,pterm,dum]=bmx_tem(tt)
ini=pterm[0:3]
flag_x=False
if f_fix.flag:
fix=f_fix.value
flag_x=True
p0_f=[ini[0],ini[1]]
if fixpar:
if fix_value < 0.1:
flag_x=False
else:
fix=fix_value
flag_x=True
p0_f=[ini[0],ini[1]]
if flag_spline.flag:
v_list=flag_spline.fit_vol
elif flag_poly.flag:
v_list=flag_poly.fit_vol
else:
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
f_fix_orig=f_fix.flag
volmax=volume_dir(tt,0.)
if flag_volume_max.value:
print("Computation stop. Use set_volume_range to fix the problem")
stop()
volnew=np.append(v_list,volmax)
p_list=np.array([])
for vi in volnew:
pi=pressure_dir(tt,vi)
p_list=np.append(p_list,pi)
v_new=np.array([])
p_new=np.array([])
for iv in zip(volnew,p_list):
if iv[1]>=-0.01:
v_new=np.append(v_new,iv[0])
p_new=np.append(p_new,iv[1])
try:
if flag_x:
pdir, pcov_dir = curve_fit(lambda v_new, v0, k0: \
bm3(v_new, v0, k0, fix), \
v_new, p_new, p0=p0_f, method='dogbox',\
ftol=1e-15, xtol=1e-15)
else:
pdir, pcov_dir = curve_fit(bm3, v_new, p_new, \
method='dogbox', p0=ini[0:3], ftol=1e-15, xtol=1e-15)
perr_t=np.sqrt(np.diag(pcov_dir))
except RuntimeError:
print("EoS optimization did not succeeded for t = %5.2f" % tt)
flag_dir.on()
if flag_serie:
return 0,0
else:
return
if flag_x:
pdir=np.append(pdir,fix)
perr_t=np.append(perr_t,0.00)
if flag_serie and vol_flag:
return pdir[0],pdir[1],pdir[2]
if flag_serie:
return pdir[1],pdir[2]
if out:
return pdir[0], pdir[1], pdir[2]
print("\nBM3 EoS from P(V) fit\n")
print("K0: %8.2f (%4.2f) GPa" % (pdir[1],perr_t[1]))
print("Kp: %8.2f (%4.2f) " % (pdir[2],perr_t[2]))
print("V0: %8.4f (%4.2f) A^3" % (pdir[0],perr_t[0]))
info.temp=tt
info.k0=pdir[1]
info.kp=pdir[2]
info.v0=pdir[0]
vol=np.linspace(min(v_new),max(v_new),16)
press=bm3(vol,*pdir)
plt.figure()
plt.title("BM3 fit at T = %5.1f K\n" % tt)
plt.plot(v_new,p_new,"k*")
plt.plot(vol,press,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("Pressure (GPa)")
plt.show()
if not f_fix_orig:
reset_fix()
if prt:
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in zip(v_new,p_new):
print(" %5.3f %5.2f" % (vp_i[0], vp_i[1]))
def bulk_dir_serie(tini, tfin, npoints, degree=2, update=False, **kwargs):
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_serie=np.linspace(tini, tfin, npoints)
tx_serie=np.array([])
b_serie=np.array([])
for ti in t_serie:
flag_dir.off()
if not fixpar:
bi,kpi=bulk_dir(ti,serie=True)
else:
bi,kpi=bulk_dir(ti, serie=True, fix=fix_value)
if not flag_dir.value:
b_serie=np.append(b_serie,bi)
tx_serie=np.append(tx_serie,ti)
else:
pass
t_serie=tx_serie
plt.figure()
plt.plot(t_serie,b_serie,"k*")
plt.title("Bulk modulus (K0)")
plt.xlabel("T(K)")
plt.ylabel("K (GPa)")
plt.title("Bulk modulus as a function of T")
fit_b=np.polyfit(t_serie,b_serie,degree)
b_fit=np.polyval(fit_b,t_serie)
plt.plot(t_serie,b_fit,"k-")
print("\nResults from the fit (from high to low order)")
np.set_printoptions(formatter={'float': '{: 4.2e}'.format})
print(fit_b)
np.set_printoptions(formatter=None)
plt.show()
if update:
return fit_b
volume_ctrl.shift=0.
def bm4_dir(tt,prt=True):
"""
Optimizes a BM4 EoS from volumes and total pressures at a given
temperature. Negative pressures are excluded from the computation.
Args:
tt: temperature
prt (optional): if True, prints a P(V) list; default: False
"""
flag_volume_max.value=False
start_bm4()
if flag_spline.flag:
v_list=flag_spline.fit_vol
elif flag_poly.flag:
v_list=flag_poly.fit_vol
else:
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
volmax=volume_dir(tt,0.)
if flag_volume_max.value:
print("Computation stop. Use set_volume_range to fix the problem")
stop()
volnew=np.append(v_list,volmax)
p_list=np.array([])
for vi in volnew:
pi=pressure_dir(tt,vi)
p_list=np.append(p_list,pi)
v_new=np.array([])
p_new=np.array([])
for iv in zip(volnew,p_list):
if iv[1]>=-0.01:
v_new=np.append(v_new,iv[0])
p_new=np.append(p_new,iv[1])
ini=np.copy(bm4.en_ini[0:4])
ini[1]=ini[1]*conv*1e21
pdir, pcov_dir = curve_fit(bm4.pressure, v_new, p_new, \
p0=ini, ftol=1e-15, xtol=1e-15)
perr_t=np.sqrt(np.diag(pcov_dir))
print("\nBM4 EoS from P(V) fit\n")
print("K0: %8.2f (%4.2f) GPa" % (pdir[1],perr_t[1]))
print("Kp: %8.2f (%4.2f) " % (pdir[2],perr_t[2]))
print("Kpp: %8.2f (%4.2f) " % (pdir[3], perr_t[3]))
print("V0: %8.4f (%4.2f) A^3" % (pdir[0],perr_t[0]))
vol=np.linspace(min(v_new),max(v_new),16)
press=bm4.pressure(vol,*pdir)
plt.figure()
plt.title("BM4 fit at T = %5.1f K\n" % tt)
plt.plot(v_new,p_new,"k*")
plt.plot(vol,press,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("Pressure (GPa)")
plt.show()
if prt:
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in zip(v_new,p_new):
print(" %5.3f %5.2f" % (vp_i[0], vp_i[1]))
def bulk_modulus_p(tt,pp,noeos=False,prt=False,**kwargs):
"""
Bulk modulus at a temperature and pressure
Args:
tt: temperature
pp: pressure
noeos: to compute pressures, the bm3 EoS is used if
noeos=False (default); otherwise the EoS is
used only for the static part, and vibrational
pressures are obtained from the derivative
of the F function (pressure_dir function)
prt: if True, results are printed
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1. This is relevant
if noeos=False
The values are computed through the direct derivative -V(dP/dV)_T.
Since the computation of pressure requires the bm3_tem function
(if noeos=False) Kp can be kept fixed by setting fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if not noeos:
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)[0]
else:
vol=new_volume(tt,pp)[0]
else:
vol=volume_dir(tt,pp)
if not vd.flag:
delta=pr.delta_v
else:
delta=vd.delta
numv=pr.nump_v
degree=pr.degree_v
v_range=np.linspace(vol-delta/2.,vol+delta/2.,numv)
press_range=[]
for iv in v_range:
if not noeos:
if fixpar:
p_i=pressure(tt,iv,fix=fix_value)
else:
p_i=pressure(tt,iv)
else:
p_i=pressure_dir(tt,iv)
press_range=np.append(press_range,p_i)
press_fit=np.polyfit(v_range,press_range,degree)
b_poly=np.polyder(press_fit,1)
b_val=np.polyval(b_poly,vol)
b_val=(-1*b_val*vol)
if prt:
eos=str(noeos)
print("Bulk Modulus at T = %5.1f K and P = %3.1f GPa, noeos = %s: %6.3f GPa, V = %6.3f " %\
(tt,pp,eos,b_val, vol))
else:
b_val=round(b_val,3)
return b_val, vol
def bulk_modulus_p_serie(tini, tfin, nt, pres, noeos=False, fit=False, type='poly', \
deg=2, smooth=5, out=False, **kwargs):
"""
Computes the bulk modulus from the definition K=-V(dP/dV)_T in a range
of temperature values
Args:
tini: lower temperature in the range
tfin: higher temperature in the range
nt: number of points in the [tini, tfin] range
pres: pressure (GPa)
noeos: see note below
fit: if True, a fit of the computed K(T) values is performed
type: type of the fit ('poly', or 'spline')
deg: degree of the fit
smooth: smooth parameter for the fit; relevant if type='spline'
out: if True, the parameters of the K(T) and V(T) fits are printed
Keyword Args:
fix: if fix is provided, Kp is kept fixed at the fix value
Relevant if noeos=False
Note:
if noeos=False, the pressure at any given volume is calculated
from the equation of state. If noeos=True, the pressure is computed
as the first derivative of the Helmholtz function (at constant
temperature)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_list=np.linspace(tini, tfin, nt)
b_l=np.array([])
t_l=np.array([])
v_l=np.array([])
if fixpar:
for it in t_list:
ib, v_val=bulk_modulus_p(it,pres,noeos=noeos,fix=fix_value)
if vol_opt.flag:
b_l=np.append(b_l,ib)
t_l=np.append(t_l,it)
v_l=np.append(v_l,v_val)
else:
for it in t_list:
ib,v_val=bulk_modulus_p(it,pres,noeos=noeos)
if vol_opt.flag:
t_l=np.append(t_l,it)
b_l=np.append(b_l,ib)
v_l=np.append(v_l,v_val)
if fit:
t_fit=np.linspace(tini,tfin,50)
if type=='poly':
fit_par=np.polyfit(t_l,b_l,deg)
b_fit=np.polyval(fit_par,t_fit)
fit_par_v=np.polyfit(t_l,v_l,deg)
v_fit=np.polyval(fit_par_v,t_fit)
elif type=='spline':
fit_par=UnivariateSpline(t_l,b_l,k=deg,s=smooth)
b_fit=fit_par(t_fit)
fit_par_v=UnivariateSpline(t_l,v_l,k=deg,s=0.1)
v_fit=fit_par_v(t_fit)
method='poly'
if type=='spline':
method='spline'
lbl=method+' fit'
plt.figure()
plt.plot(t_l,b_l,"k*",label='Actual values')
if fit:
plt.plot(t_fit, b_fit,"k-",label=lbl)
plt.xlabel("Temperature (K)")
plt.ylabel("K (GPa)")
tlt="Bulk modulus at pressure "+str(pres)
plt.title(tlt)
plt.legend(frameon=False)
plt.show()
reset_fix()
if out & fit:
return fit_par, fit_par_v
def bulk_modulus_adiabat(tt,pp,noeos=False, prt=True,**kwargs):
"""
Adiabatic bulk modulus at a temperature and pressure
Args:
tt: temperature
pp: pressure
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1
The values are computed through the direct derivative -V(dP/dV)_T.
Since the computation of pressure requires the bm3_tem function,
Kp can be kept fixed by setting fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)[0]
alpha,kt_dum,pr=thermal_exp_v(tt,vol,False,fix=fix_value)
kt,_=bulk_modulus_p(tt,pp,noeos=noeos,fix=fix_value)
ent,cv=entropy_v(tt,vol,False,False,fix=fix_value)
else:
vol=new_volume(tt,pp)[0]
alpha,kt_dum,pr=thermal_exp_v(tt,vol,False)
kt,_=bulk_modulus_p(tt,pp,noeos=noeos)
ent,cv=entropy_v(tt,vol,False,False)
volm=(vol*avo*1e-30)/zu
ks=kt*(1+volm*(tt*1e9*kt*alpha**2)/cv)
if prt:
print("\nAdiabatic bulk modulus Ks: %5.2f GPa" % ks)
print("Isoth. Kt: %5.2f GPa, alpha: %5.2e K^-1, sp. heat Cv: %6.2f J/mol K"\
% (kt, alpha, cv))
print("Cell volume: %6.2f A^3, molar volume %6.2f cm^3" % (vol, 1e6*volm))
else:
return ks
def static(plot=False, vmnx=[0., 0.]):
"""
Static EoS
Args:
plot: plot of the E(V) curve
vmnx: array of two reals [vmin and vmax]; vmin is the
minimum volume and vmax is the maximum volume.
If vmin and vmax are both 0., the whole V range
is used (as specified in the static energies file).
Default=[0., 0.]
Note:
The volume range can also be modified by using the methods
of the static_volume class
Examples:
>>> static_volume.set(100., 120.)
>>> static_volume.on()
>>> static(plt=True)
Computes the static EoS in the [100., 120.] volume range. The same
is obtained with
>>> static(plt=True, vmnx=[100., 120.])
However, with the first method the defined volume range is recorded for
future computations; by using the second method, the volume range is reset
to the original one, once the fit is performed.
"""
global pcov
if flag_err:
return None
vol_flag=False
if static_range.flag:
vol_min=static_range.vmin
vol_max=static_range.vmax
vol_flag=True
else:
if (vmnx[0] > 0.1) or (vmnx[1] > 0.1):
vol_flag=True
vol_min=vmnx[0]
vol_max=vmnx[1]
if vol_flag:
vol_select=(volume >= vol_min) & (volume <= vol_max)
vol_selected=volume[vol_select]
energy_selected=energy[vol_select]
if not vol_flag:
popt, pcov = curve_fit(v_bm3, volume, energy, p0=ini,ftol=1e-15,xtol=1e-15)
else:
popt, pcov = curve_fit(v_bm3, vol_selected, energy_selected, p0=ini,ftol=1e-15,xtol=1e-15)
k_gpa=popt[1]*conv/1e-21
kp=popt[2]
v0=popt[0]
perr=np.sqrt(np.diag(pcov))
ke=perr[1]*conv/1e-21
print("\nStatic BM3 EoS")
print("\nBulk Modulus: %5.2f (%4.2f) GPa" % (k_gpa, ke))
print("Kp: %5.2f (%4.2f)" % (kp, perr[2]))
print("V0: %5.4f (%4.2f) A^3" % (v0, perr[0]))
print("E0: %5.8e (%4.2e) hartree" % (popt[3], perr[3]))
if vol_flag:
print("\nStatic EoS computed in a restricted volume range:")
print(vol_selected)
print("\n")
info.k0_static=k_gpa
info.kp_static=kp
info.v0_static=v0
info.popt=popt
info.popt_orig=popt
vd.set_delta(v0)
vol_min=np.min(volume)
vol_max=np.max(volume)
nvol=50
vol_range=np.linspace(vol_min,vol_max,nvol)
if plot:
plt.figure(0)
plt.title("E(V) static BM3 curve")
plt.plot(volume,energy,"*")
plt.plot(vol_range, v_bm3(vol_range, *popt), 'b-')
plt.ylabel("Static energy (a.u.)")
plt.xlabel("V (A^3)")
plt.show()
def p_static(nvol=50, v_add=[], e_add=[]):
"""
Computes a static BM3-EoS from a P/V set of data. Data (cell volumes in A^3 and
pressures in GPa) must be contained in a file whose name must be specified
in the input file (together with the energy, in hartree, at the equilibrium
static volume.
Args:
nvol: number of volume points for the graphical output (default 50)
v_add / e_add: lists of volume/energy data to be plotted together
with the E/V curve from the V-EoS fit. Such added
points are not used in the fit (no points added as default)
Note:
This function provides static data for the calculation of the static
contribution to the Helmholtz free energy. It is an alternative to
the fit of the static E/V data performed by the 'static' function.
"""
add_flag=False
if v_add != []:
add_flag=True
p_data=np.loadtxt(data_p_file)
pres_gpa=p_data[:,1]
vs=p_data[:,0]
pres=pres_gpa*1e-21/conv
pstat, cstat = curve_fit(bm3, vs, pres, p0=ini[0:3],ftol=1e-15,xtol=1e-15)
info.popt=pstat
info.popt=np.append(info.popt,static_e0)
k_gpa=info.popt[1]*conv/1e-21
kp=info.popt[2]
v0=info.popt[0]
info.k0_static=k_gpa
info.kp_static=kp
info.v0_static=v0
print("\nStatic BM3 EoS")
print("\nBulk Modulus: %5.2f GPa" % k_gpa)
print("Kp: %5.2f " % kp )
print("V0: %5.4f A^3" % v0)
print("E0: %5.8e hartree" % info.popt[3])
vol_min=np.min(vs)
vol_max=np.max(vs)
ps=info.popt[0:3]
vol_range=np.linspace(vol_min,vol_max,nvol)
p_GPa=bm3(vol_range, *ps)*conv/1e-21
plt.figure(0)
plt.title("P(V) static BM3 curve")
plt.plot(vs,pres_gpa,"*")
plt.plot(vol_range, p_GPa, 'b-')
plt.ylabel("Pressure (GPa)")
plt.xlabel("V (A^3)")
plt.show()
p_stat.flag=True
p_stat.vmin=np.min(vs)
p_stat.vmax=np.max(vs)
p_stat.pmin=np.min(pres_gpa)
p_stat.pmax=np.max(pres_gpa)
p_stat.npoints=vs.size
p_stat.k0=k_gpa
p_stat.kp=kp
p_stat.v0=v0
p_stat.e0=static_e0
energy_static=v_bm3(vol_range, *info.popt_orig)
energy_pstatic=v_bm3(vol_range, *info.popt)
delta=energy_pstatic-energy_static
select=(volume >= vol_min) & (volume <= vol_max)
vv=volume[select]
ee=energy[select]
plt.figure()
plt.plot(vol_range, energy_static, "k-", label="STATIC case")
plt.plot(vol_range, energy_pstatic, "k--", label="PSTATIC case")
plt.plot(vv,ee,"k*", label="Original E(V) data")
if add_flag:
plt.plot(v_add, e_add, "r*", label="Not V-BM3 fitted data")
plt.legend(frameon=False)
plt.xlabel("Volume (A^3)")
plt.ylabel("E (hartree)")
plt.title("E(V) curves")
plt.show()
plt.figure()
plt.plot(vol_range,delta,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("E (hartree)")
plt.title("Pstatic and static energy difference")
plt.show()
delta=abs(delta)
mean=delta.mean()
mean_j=mean*conv*avo/zu
std=delta.std()
imx=np.argmax(delta)
mx=delta[imx]
vx=vol_range[imx]
print("Mean discrepancy: %6.3e hartree (%5.1f J/mole)" % (mean, mean_j))
print("Standard deviation: %4.1e hartree" % std)
print("Maximum discrepancy %6.3e hartree for a volume of %6.2f A^3" % (mx, vx))
def static_pressure_bm3(vv):
"""
Outputs the static pressure (in GPa) at the volume (vv)
Args:
vv: volume
"""
static(plot=False)
k0=info.popt[1]
kp=info.popt[2]
v0=info.popt[0]
p_static_bm3=bm3(vv,v0, k0,kp)
ps=p_static_bm3*conv/1e-21
print("Static pressure at the volume: %4.2f" % ps)
def start_bm4():
bm4.on()
bm4.estimates(volume,energy)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bm4p, bm4c = curve_fit(bm4.energy, volume, energy, \
method='dogbox', p0=bm4.en_ini,ftol=1e-15,xtol=1e-15,gtol=1e-15)
bm4.store(bm4p)
bm4.upgrade()
bm4.upload(bm4p)
bm4_k=bm4p[1]*conv/1e-21
kp=bm4p[2]
kpp=bm4p[3]
v0=bm4p[0]
print("\nStatic BM4-EoS")
print("\nBulk Modulus: %5.2f GPa" % bm4_k)
print("Kp: %5.2f " % kp)
print("Kpp: %5.2f " % kpp)
print("V0: %8.4f A^3" % v0)
print("\n")
plt.figure()
# bm4e=np.array([])
vbm4=np.linspace(min(volume),max(volume),50)
bm4e=bm4.energy(vbm4,*bm4.bm4_static_eos)
plt.plot(vbm4,bm4e,"k-")
plt.plot(volume,energy,"k*")
plt.title("Static Energy: BM4 fit")
plt.xlabel("Static energy (a.u.)")
plt.ylabel("V (A^3)")
plt.show()
def free(temperature):
"""
Computes the Helmholtz free energy (hartree) at a given temperature
Args:
temperature: temperature (in K) at which the computation is done
Note:
1. ei is the static energy
2. enz_i is the zero point energy
3. fth_i is thermal contribution to the Helmholtz free energy
4. tot_i is the total Helmholtz free energy
Note:
This is a direct calculation that avoids the fit of a polynomium
to the frequencies. No FITVOL in input.txt
Note:
If kieffer.flag is True, the contribution from acoustic branches
is taken into account, by following the Kieffer model.
"""
energy_tot=[]
for ivol in int_set:
vol_i=data_vol_freq_orig[ivol]
if bm4.flag:
ei=bm4.energy(vol_i,*bm4.bm4_static_eos)
else:
ei=v_bm3(vol_i, *info.popt)
enz_i=0.
fth_i=0.
eianh=0.
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
freq_i=lo.data_freq[ifreq,ivol+1]
if freq_i >= 0.:
fth_i=fth_i+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/temperature))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz_i=enz_i+deg[ifreq]*freq_i*ez_fact
evib_i=enz_i+fth_i*kb*temperature/conv+eianh
tot_i=ei+evib_i
energy_tot=np.append(energy_tot,tot_i)
if kieffer.flag:
free_k=kieffer.get_value(temperature)
free_k=free_k/(avo*conv)
energy_tot=energy_tot+free_k
return energy_tot
def free_fit(temperature):
"""
Computes the Helmholtz free energy (in hartree) at a given temperature
Args:
temperature: temperature (in K)
Note:
1. ei is the static energy
2. enz_i is the zero point energy
3. fth_i is thermal contribution to the Helmholtz free energy
4. tot_i is the total Helmholtz free energy
Note:
This computation makes use of polynomia fitted
to the frequencies of each vibrational mode, as
functions of volume. It is activated by the keyword
FITVOL in the input.txt file
Note:
Possible contributions from anharmonicity (keyword ANH in the input
file) or from a modified Kieffer model (keyword KIEFFER in the input file)
are included. NO contribution from DISP modes is considered (phonon dispersion
from a supercell calculation).
Note: the volumes at which the free energy refers are defined in the fit_vol
list
"""
energy_tot=[]
eianh=0.
if flag_spline.flag:
fit_vol=flag_spline.fit_vol
elif flag_poly.flag:
fit_vol=flag_poly.fit_vol
for ivol in fit_vol:
if bm4.flag:
ei=bm4.energy(ivol,*bm4.bm4_static_eos)
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
else:
ei=v_bm3(ivol,*info.popt)
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
enz_i=0.
fth_i=0.
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
if not flag_spline.flag:
freq_i=freq_v_fun(ifreq,ivol)
else:
freq_i=freq_spline_v(ifreq,ivol)
if freq_i >= 0.:
fth_i=fth_i+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/temperature))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz_i=enz_i+deg[ifreq]*freq_i*ez_fact
evib_i=enz_i+fth_i*kb*temperature/conv+eianh
tot_i=ei+evib_i
energy_tot=np.append(energy_tot,tot_i)
if kieffer.flag:
free_k=kieffer.get_value(temperature)
free_k=free_k/(avo*conv)
energy_tot=energy_tot+free_k
return energy_tot
def free_fit_vt(tt,vv):
"""
Computes the Helmholtz free energy at a given pressure and volume.
Free energy is computed by addition of several contributions:
(1) static contribution from a volume-integrated BM3 EoS
(2) vibrational contribution from optical vibrational modes
(3) vibrational contribution from phonon dispersion (supercell calculations)
(4) vibrational contribution from acoustic modes (modified Kieffer model)
(5) vibrational contribution from anharmonic mode(s)
Contributions (1) and (2) are always included; contributions (3) and (4)
are mutually exclusive and are respectively activated by the keywords
DISP and KIEFFER in the input file; anharmonic contributions (5) are activated
by the keyword ANH in the input file.
Args:
tt: temperature (K)
vv: volume (A^3)
"""
e_static=v_bm3(vv,*info.popt)
enz=0
fth=0
eianh=0.
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,vv,tt)*anharm.wgt[im]
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
if not flag_spline.flag:
freq_i=freq_v_fun(ifreq,vv)
else:
freq_i=freq_spline_v(ifreq,vv)
if freq_i >= 0.:
fth=fth+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/tt))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz=enz+deg[ifreq]*freq_i*ez_fact
tot_no_static=enz+fth*kb*tt/conv+eianh
tot=e_static+tot_no_static
if kieffer.flag:
free_k=kieffer.get_value(tt)
free_k=free_k/(avo*conv)
tot=tot+free_k
if disp.flag and (disp.eos_flag or disp.thermo_vt_flag):
if not disp.fit_vt_flag:
disp.free_fit_vt()
print("\n**** INFORMATION ****")
print("The V,T-fit of the phonon dispersion surface was not prepared")
print("it has been perfomed with default values of the relevant parameters")
print("Use the disp.free_fit_vt function to redo with new parameters\n")
disp_l=disp.free_vt(tt,vv)
free_f=(tot_no_static+disp_l)/(disp.molt+1)
tot=e_static+free_f
return tot
def eos_temp_range(vmin_list, vmax_list, npp, temp):
"""
EoS computed for different volumes ranges
Args:
vmin_list: list of minimum volumes
vmax_list: list of maximum volumes
npp: number of points in each V-range
temp: temperature
Note:
vmin_list and vmax_list must be lists of same length
"""
final=np.array([])
size=len(vmin_list)
for vmin, vmax in zip(vmin_list,vmax_list):
v_list=np.linspace(vmin,vmax,npp)
free_list=np.array([])
for iv in v_list:
ifree=free_fit_vt(temp, iv)
free_list=np.append(free_list,ifree)
pterm, pcov_term = curve_fit(v_bm3, v_list, free_list, \
p0=ini, ftol=1e-15, xtol=1e-15)
k_gpa=pterm[1]*conv/1e-21
k_gpa_err=pcov_term[1]*conv/1e-21
pmax=pressure(temp,vmin)
pmin=pressure(temp,vmax)
final=np.append(final, [vmin, vmax, round(pmax,1), round(pmin,1), round(pterm[0],4), round(k_gpa,2), \
round(pterm[2],2)])
final=final.reshape(size,7)
final=final.T
pd.set_option('colheader_justify', 'center')
df=
|
pd.DataFrame(final, index=['Vmin','Vmax','Pmax','Pmin','V0','K0','Kp'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures # TODO: implement what I need from this package
class Design:
"""
Class Docstring.
"""
def __init__(self, experiments=None, levels=None):
"""
:param int experiments: Number of Experiments to design
:param dict levels: Levels of factors
Constructor Docstring.
"""
self.experiments = experiments
self.levels = levels
self.features = len(levels.keys())
self.order = None
self.interactions_only = None
self.bias = None
self.epochs = None
self.engine = None
# ---------------DUNDER, GETTERS AND SETTERS FUNCTION---------------------------------------------------------------
def __repr__(self):
return f"Design(experiments={self.experiments}, levels={self.levels})"
def set_model(self, order, interactions_only=False, bias=True):
"""
:param int order: Order of the polynomial (1-main effects, 2-quadratic effects, ...)
:param bool interactions_only: Include terms as x1^2 or not
:param bool bias: Include a beta_0 on the design matrix or not
Setter for model parameters
"""
self.order = order
self.interactions_only = interactions_only
self.bias = bias
def set_algorithm(self, epochs, engine):
"""
:param int epochs: Number of random start to check
:param str engine: What engine to use for maximization. Includes ("A", "C", "D", "E", "S", "T", "G", "I", "V")
Setter for algorithm parameters
"""
self.epochs = epochs
self.engine = engine
# ------------------------------------------------------------------------------------------------------------------
def gen_random_design(self) -> pd.DataFrame:
"""
Generate a random starting design matrix.
"""
df = pd.DataFrame(np.random.random((self.experiments, self.features)))
df.columns = ['x' + str(x) for x in list(range(self.features))]
return df
def gen_model_matrix(self, data=None) -> pd.DataFrame:
"""
:param pd.DataFrame data: Design matrix
Generate the model matrix of a design matrix (argument)
"""
if any(var is None for var in [self.order, self.interactions_only, self.bias]):
raise Exception('Parameters: \'order\', \'interactions_only\' and \'bias\' cannot be None')
poly = PolynomialFeatures(degree=self.order,
interaction_only=self.interactions_only,
include_bias=self.bias)
df = pd.DataFrame(poly.fit_transform(data))
df.columns = poly.get_feature_names(data.columns)
return df
@staticmethod
def clear_histories(optimalities, designs, design_mat):
"""
:param list designs: Number of Experiments to design
:param list optimalities: Number of random start to check
:param pd.DataFrame design_mat: Should the engine be maximized (True) or minimizes (False)?
Run the coordinate exchange algorithm and produce the best model matrix, according to the engine chosen, as well as a history of all other possible model matrices and the history of the selected engine used.
"""
hstry_designs = pd.DataFrame(designs, columns=['epoch', *list(design_mat.columns)])
hstry_opt_cr =
|
pd.DataFrame(optimalities)
|
pandas.DataFrame
|
'''
This program will calculate a timeseries of active users across the lifetime of a project (or a workflow id/version for a project).
The inputs needed are:
the classification export file (request & download from the Project Builder)
plus some optional inputs that are listed below.
The program takes snapshots of the classification timeline in units of hours or days, and over each time period it computes the number of classifications submitted, the number of classifiers (registered and unregistered) who submitted the classifications, and the time spent classifying by those users. It outputs these timeseries to a CSV file.
<NAME>, 30th March 2017
updated 21st May 2017
'''
import sys, os
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
print("\nUsage: %s classifications_infile" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.")
print(" Optional extra inputs (no spaces):")
print(" workflow_id=N")
print(" specify the program should only consider classifications from workflow id N")
print(" workflow_version=M")
print(" specify the program should only consider classifications from workflow version M")
print(" (note the program will only consider the major version, i.e. the integer part)")
print(" outfile=filename.csv")
print(" specify the name of the output file. If not specified, it will")
print(" be based on the input filename, e.g. if your input file is")
print(" my-project-classifications.csv, the output file name will be")
print(" my-project-classifications_active_users_timeseries.csv.")
print(" project=project_name")
print(" For use labelling plots with the project name")
print(" --time_spent")
print(" if specified, the program will try to compute time actively spent")
print(" classifying on the project using started_at and finished_at metadata")
print(" --days")
print(" compute stats by day rather than by hour")
print(" --plots_only")
print(" if specified, the program won't re-calculate the time series")
print(" and will instead just read in the outfile and re-make plots.")
sys.exit(0)
import numpy as np
import pandas as pd
import datetime
import dateutil.parser
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
import ujson
import gc
from gini import gini
plt.rc('figure', facecolor='none', edgecolor='none', autolayout=True)
plt.rc('path', simplify=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('axes', labelsize='large', facecolor='none', linewidth=0.7, color_cycle = ['k', 'r', 'g', 'b', 'c', 'm', 'y'])
plt.rc('xtick', labelsize='medium')
plt.rc('ytick', labelsize='medium')
plt.rc('lines', markersize=4, linewidth=1, markeredgewidth=0.2)
plt.rc('legend', numpoints=1, frameon=False, handletextpad=0.3, scatterpoints=1, handlelength=2, handleheight=0.1)
plt.rc('savefig', facecolor='none', edgecolor='none', frameon='False')
params = {'font.size' : 11,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.minor.size': 3,
'ytick.minor.size': 3,
}
plt.rcParams.update(params)
# default value is not to care about workflow ID or version
workflow_id = -1
workflow_version = -1
# assume no project name is specified
project_name = ""
# default mode is to calculate the timeseries afresh
plot_only = False
# default mode is to not worry about time spent classifying
time_spent = False
# default mode is to compute stats by the hour
dt_unit = 'h'
outfile = classfile_in.replace(".csv", "_active_users_timeseries.csv")
# if the input filename doesn't have ".csv" in it you might end up overwriting
# the input file with the output file and that would be bad; don't do that.
if outfile == classfile_in:
outfile += "_active_users_timeseries.csv"
# Print out the input parameters just as a sanity check
print("File to be read: %s" % classfile_in)
print(len(sys.argv))
# check for other command-line arguments
if len(sys.argv) > 2:
# if there are additional arguments, loop through them
for i_arg, argstr in enumerate(sys.argv[2:]):
arg = argstr.split('=')
if arg[0] == "workflow_id":
workflow_id = int(arg[1])
print("Restricting classifications to workflow id: %d" % workflow_id)
elif arg[0] == "workflow_version":
workflow_version = arg[1]
print("Restricting classifications to workflow version: %d" % int(workflow_version))
elif arg[0] == "outfile":
outfile = arg[1]
elif arg[0] == "project":
project_name = arg[1]
elif arg[0] == "--time_spent":
time_spent = True
elif arg[0] == "--days":
dt_unit = 'D'
elif arg[0] == "--plots_only":
plot_only = True
print("File to be written: %s" % outfile)
if not plot_only:
print("Reading classifications...")
#classifications = pd.read_csv(classfile_in)
# the above will work but uses a LOT of memory for projects with > 1 million
# classifications. Nothing here uses the actual classification data so don't read it
if time_spent:
cols_keep = ["user_name", "user_id", "user_ip", "workflow_id", "workflow_version", "created_at", "metadata"]
else:
cols_keep = ["user_name", "user_id", "user_ip", "workflow_id", "workflow_version", "created_at"]
classifications = pd.read_csv(classfile_in, usecols=cols_keep)
# now restrict classifications to a particular workflow id/version if requested
if (workflow_id > 0) | (float(workflow_version) > 0):
# only keep the stuff that matches these workflow properties
if (workflow_id > 0):
#print("Considering only workflow id %d" % workflow_id)
in_workflow = classifications.workflow_id == workflow_id
else:
# the workflow id wasn't specified, so just make an array of true
in_workflow = np.array([True for q in classifications.workflow_id])
if (workflow_version > 0):
classifications['version_int'] = [int(q) for q in classifications.workflow_version]
#print("Considering only major workflow version %d" % int(workflow_version))
# we only care about the major workflow version, not the minor version
in_version = classifications.version_int == int(workflow_version)
else:
in_version = np.array([True for q in classifications.workflow_version])
if (sum(in_workflow & in_version) == 0):
print("ERROR: your combination of workflow_id and workflow_version does not exist!\nIgnoring workflow id/version request and computing stats for ALL classifications instead.")
#classifications = classifications_all
else:
# select the subset of classifications
classifications = classifications[in_workflow & in_version]
else:
# just use everything
#classifications = classifications_all
workflow_ids = classifications.workflow_id.unique()
# this takes too much CPU time just for a print statement. Just use float versions
#classifications['version_int'] = [int(q) for q in classifications.workflow_version]
version_ints = classifications.workflow_version.unique()
print("Considering all classifications in workflow ids:")
print(workflow_ids)
print(" and workflow_versions:")
print(version_ints)
if time_spent:
classifications['meta_json'] = [ujson.loads(q) for q in classifications.metadata]
classifications['started_at_str'] = [q['started_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]
sa_temp = classifications['started_at_str']
fa_temp = classifications['finished_at_str']
classifications['count'] = np.ones_like(classifications.user_name)
print("Creating timeseries...")#,datetime.datetime.now().strftime('%H:%M:%S.%f')
ca_temp = classifications['created_at'].copy()
# Do these separately so you can track errors to a specific line
# Try the format-specified ones first (because it's faster, if it works)
try:
classifications['created_at_ts'] = pd.to_datetime(ca_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
#print "Oops:\n", the_error
try:
classifications['created_at_ts'] = pd.to_datetime(ca_temp, format='%Y-%m-%d %H:%M:%S')
except Exception as the_error:
#print "Oops:\n", the_error
classifications['created_at_ts'] = pd.to_datetime(ca_temp)
# no except for this because if it fails the program really should exit anyway
if time_spent:
try:
classifications['started_at_ts'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
#print "Oops:\n", the_error
try:
classifications['started_at_ts'] =
|
pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S')
|
pandas.to_datetime
|
import sys
import os.path
import argparse
import time
import configparser
from configparser import ExtendedInterpolation
import pandas as pd
import json
import glob
###################################
parser = argparse.ArgumentParser(description='Remove duplicate features.')
parser.add_argument('-eb','--experiment_base_dir', type=str, default='./experiments', help='Path to the experiments directory.', required=False)
parser.add_argument('-en','--experiment_name', type=str, help='Name of the experiment.', required=True)
parser.add_argument('-rn','--run_name', type=str, help='Name of the run.', required=True)
parser.add_argument('-pdm','--precursor_definition_method', type=str, choices=['pasef','3did','mq'], help='The method used to define the precursor cuboids.', required=True)
parser.add_argument('-ini','--ini_file', type=str, default='./tfde/pipeline/pasef-process-short-gradient.ini', help='Path to the config file.', required=False)
parser.add_argument('-v','--verbose_mode', action='store_true', help='Verbose mode.')
args = parser.parse_args()
# Print the arguments for the log
info = []
for arg in vars(args):
info.append((arg, getattr(args, arg)))
print(info)
start_run = time.time()
# check the experiment directory exists
EXPERIMENT_DIR = "{}/{}".format(args.experiment_base_dir, args.experiment_name)
if not os.path.exists(EXPERIMENT_DIR):
print("The experiment directory is required but doesn't exist: {}".format(EXPERIMENT_DIR))
sys.exit(1)
# check the INI file exists
if not os.path.isfile(args.ini_file):
print("The configuration file doesn't exist: {}".format(args.ini_file))
sys.exit(1)
# load the INI file
cfg = configparser.ConfigParser(interpolation=ExtendedInterpolation())
cfg.read(args.ini_file)
# input features directory
FEATURES_DIR = "{}/features-{}".format(EXPERIMENT_DIR, args.precursor_definition_method)
# set up constants
if args.precursor_definition_method == '3did':
DUP_MZ_TOLERANCE_PPM = cfg.getint('3did', 'DUP_MZ_TOLERANCE_PPM')
DUP_SCAN_TOLERANCE = cfg.getint('3did', 'DUP_SCAN_TOLERANCE')
DUP_RT_TOLERANCE = cfg.getint('3did', 'DUP_RT_TOLERANCE')
else:
DUP_MZ_TOLERANCE_PPM = cfg.getint('ms1', 'DUP_MZ_TOLERANCE_PPM')
DUP_SCAN_TOLERANCE = cfg.getint('ms1', 'DUP_SCAN_TOLERANCE')
DUP_RT_TOLERANCE = cfg.getint('ms1', 'DUP_RT_TOLERANCE')
print('removing duplicate features that are within +/- {} ppm m/z, {} scans, {} seconds'.format(DUP_MZ_TOLERANCE_PPM, DUP_SCAN_TOLERANCE, DUP_RT_TOLERANCE))
# output features
FEATURES_DEDUP_FILE = '{}/exp-{}-run-{}-features-{}-dedup.feather'.format(FEATURES_DIR, args.experiment_name, args.run_name, args.precursor_definition_method)
# check the features directory
if not os.path.exists(FEATURES_DIR):
print("The features directory is required but doesn't exist: {}".format(FEATURES_DIR))
sys.exit(1)
if args.precursor_definition_method == 'pasef':
# check we have some to process
FEATURE_CHUNKS_DIR = '{}/chunks'.format(FEATURES_DIR)
feature_files = glob.glob('{}/exp-{}-run-{}-features-pasef-*.feather'.format(FEATURE_CHUNKS_DIR, args.experiment_name, args.run_name))
if len(feature_files) == 0:
print("The features files are required but doesn't exist: {}".format(FEATURE_CHUNKS_DIR))
sys.exit(1)
# load the detected features
features_l = []
for f in feature_files:
features_l.append(
|
pd.read_feather(f)
|
pandas.read_feather
|
"""
Two types of solvers/optimizers:
1. The first type take in an augmented data set returned by
data_augment, and try to minimize classification error over the
following hypothesis class: { h(X) = 1[ f(x) >= x['theta']] : f in F}
over some real-valued class F.
Input: augmented data set, (X, Y, W)
Output: a model that can predict label Y
These solvers are used with exp_grad
2. The second type simply solves the regression problem
on a data set (x, a, y)
These solvers serve as our unconstrained benchmark methods.
"""
import functools
import numpy as np
import pandas as pd
import random
import data_parser as parser
import data_augment as augment
from gurobipy import *
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, mean_absolute_error, log_loss
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingRegressor, GradientBoostingClassifier
import xgboost as xgb
import time
_LOGISTIC_C = 5 # Constant for rescaled logisitic loss; might have to
# change for data_augment
# from sklearn.model_selection import train_test_split
"""
Oracles for fair regression algorithm
"""
class SVM_LP_Learner:
"""
Gurobi based cost-sensitive classification oracle
Assume there is a 'theta' field in the X data frame
Oracle=CS; Class=linear
"""
def __init__(self, off_set=0, norm_bdd=1):
self.weights = None
self.norm_bdd = norm_bdd # initialize the norm bound to be 2
self.off_set = off_set
self.name = 'SVM_LP'
def fit(self, X, Y, W):
w = SVM_Gurobi(X, Y, W, self.norm_bdd, self.off_set)
self.weights = pd.Series(w, index=list(X.drop(['theta'], 1)))
def predict(self, X):
y_values = (X.drop(['theta'],
axis=1)).dot(np.array(self.weights))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class LeastSquaresLearner:
"""
Basic Least regression square based oracle
Oracle=LS; class=linear
"""
def __init__(self, Theta):
self.weights = None
self.Theta = Theta
self.name = "OLS"
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=None)
self.weights = pd.Series(self.lsqinfo[0], index=list(matX))
def predict(self, X):
y_values = (X.drop(['theta'],
axis=1)).dot(np.array(self.weights))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class LogisticRegressionLearner:
"""
Basic Logistic regression baed oracle
Oralce=LR; Class=linear
"""
def __init__(self, Theta, C=10000, regr=None):
self.Theta = Theta
self.name = "LR"
if regr is None:
self.regr = LogisticRegression(random_state=0, C=C,
max_iter=1200,
fit_intercept=False,
solver='lbfgs')
else:
self.regr = regr
def fit(self, X, Y, W):
matX, vecY, vecW = approx_data_logistic(X, Y, W, self.Theta)
self.regr.fit(matX, vecY, sample_weight=vecW)
pred_prob = self.regr.predict_proba(matX)
def predict(self, X):
pred_prob = self.regr.predict_proba(X.drop(['theta'], axis=1))
prob_values = pd.DataFrame(pred_prob)[1]
y_values = (np.log(1 / prob_values - 1) / (- _LOGISTIC_C) + 1) / 2
# y_values = pd.DataFrame(pred_prob)[1]
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class RF_Classifier_Learner:
"""
Basic RF classifier based CSC
Oracle=LR; Class=Tree ensemble
"""
def __init__(self, Theta):
self.Theta = Theta
self.name = "RF Classifier"
self.clf = RandomForestClassifier(max_depth=4,
random_state=0,
n_estimators=20)
def fit(self, X, Y, W):
matX, vecY, vecW = approx_data_logistic(X, Y, W, self.Theta)
self.clf.fit(matX, vecY, sample_weight=vecW)
def predict(self, X):
pred_prob = self.clf.predict_proba(X.drop(['theta'],
axis=1))
y_values = pd.DataFrame(pred_prob)[1]
pred = 1*(y_values - X['theta'] >= 0)
return pred
class XGB_Classifier_Learner:
"""
Basic GB classifier based oracle
Oracle=LR; Class=Tree ensemble
"""
def __init__(self, Theta, clf=None):
self.Theta = Theta
self.name = "XGB Classifier"
param = {'max_depth' : 3, 'silent' : 1, 'objective' :
'binary:logistic', 'n_estimators' : 150, 'gamma' : 2}
if clf is None:
self.clf = xgb.XGBClassifier(**param)
else:
self.clf = clf
def fit(self, X, Y, W):
matX, vecY, vecW = approx_data_logistic(X, Y, W, self.Theta)
self.clf.fit(matX, vecY, sample_weight=vecW)
def predict(self, X):
pred_prob = self.clf.predict_proba(X.drop(['theta'],
axis=1))
prob_values = pd.DataFrame(pred_prob)[1]
y_values = (np.log(1 / prob_values - 1) / (- _LOGISTIC_C) + 1) / 2
pred = 1*(y_values - X['theta'] >= 0)
return pred
class RF_Regression_Learner:
"""
Basic random forest based oracle
Oracle=LS; Class=Tree ensemble
"""
def __init__(self, Theta):
self.Theta = Theta
self.name = "RF Regression"
self.regr = RandomForestRegressor(max_depth=4, random_state=0,
n_estimators=200)
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.regr.fit(matX, vecY)
def predict(self, X):
y_values = self.regr.predict(X.drop(['theta'], axis=1))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
class XGB_Regression_Learner:
"""
Gradient boosting based oracle
Oracle=LS; Class=Tree Ensemble
"""
def __init__(self, Theta):
self.Theta = Theta
self.name = "XGB Regression"
params = {'max_depth': 4, 'silent': 1, 'objective':
'reg:linear', 'n_estimators': 200, 'reg_lambda' : 1,
'gamma':1}
self.regr = xgb.XGBRegressor(**params)
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.regr.fit(matX, vecY)
def predict(self, X):
y_values = self.regr.predict(X.drop(['theta'], axis=1))
pred = 1*(y_values - X['theta'] >= 0) # w * x - theta
return pred
# HELPER FUNCTIONS HERE FOR BestH Oracles
def SVM_Gurobi(X, Y, W, norm_bdd, off_set):
"""
Solving SVM using Gurobi solver
X: design matrix with the last two columns being 'theta'
A: protected feature
impose ell_infty constraint over the coefficients
"""
d = len(X.columns) - 1 # number of predictive features (excluding theta)
N = X.shape[0] # number of augmented examples
m = Model()
m.setParam('OutputFlag', 0)
Y_aug = Y.map({1: 1, 0: -1})
# Add a coefficient variable per feature
w = {}
for j in range(d):
w[j] = m.addVar(lb=-norm_bdd, ub=norm_bdd,
vtype=GRB.CONTINUOUS, name="w%d" % j)
w = pd.Series(w)
# Add a threshold value per augmented example
t = {} # threshold values
for i in range(N):
t[i] = m.addVar(lb=0, vtype=GRB.CONTINUOUS, name="t%d" % i)
t = pd.Series(t)
m.update()
for i in range(N):
xi = np.array(X.drop(['theta'], 1).iloc[i])
yi = Y_aug.iloc[i]
theta_i = X['theta'][i]
# Hinge Loss Constraint
m.addConstr(t[i] >= off_set - (w.dot(xi) - theta_i) * yi)
m.setObjective(quicksum(t[i] * W.iloc[i] for i in range(N)))
m.optimize()
weights = np.array([w[i].X for i in range(d)])
return np.array(weights)
def approximate_data(X, Y, W, Theta):
"""
Given the augmented data (X, Y, W), recover for each example the
prediction in Theta + alpha/2 that minimizes the cost;
Thus we reduce the size back to the same orginal size
"""
n = int(len(X) / len(Theta)) # size of the dataset
alpha = (Theta[1] - Theta[0])/2
x = X.iloc[:n, :].drop(['theta'], 1)
pred_vec = Theta + alpha # the vector of possible preds
minimizer = {}
pred_vec = {} # mapping theta to pred vector
for pred in (Theta + alpha):
pred_vec[pred] = (1 * (pred >= pd.Series(Theta)))
for i in range(n):
index_set = [i + j * n for j in range(len(Theta))] # the set of rows for i-th example
W_i = W.iloc[index_set]
Y_i = Y.iloc[index_set]
Y_i.index = range(len(Y_i))
W_i.index = range(len(Y_i))
cost_i = {}
for pred in (Theta + alpha):
cost_i[pred] = abs(Y_i - pred_vec[pred]).dot(W_i)
minimizer[i] = min(cost_i, key=cost_i.get)
return x, pd.Series(minimizer)
def approx_data_logistic(X, Y, W, Theta):
"""
Given the augmented data (X, Y, W), recover for each example the
prediction in Theta + alpha/2 that minimizes the cost;
Then create a pair of weighted example so that the prob pred
will minimize the log loss.
"""
n = int(len(X) / len(Theta)) # size of the dataset
alpha = (Theta[1] - Theta[0])/2
x = X.iloc[:n, :].drop(['theta'], 1)
pred_vec = {} # mapping theta to pred vector
Theta_mid = [0] + list(Theta + alpha) + [1]
Theta_mid = list(filter(lambda x: x >= 0, Theta_mid))
Theta_mid = list(filter(lambda x: x <= 1, Theta_mid))
for pred in Theta_mid:
pred_vec[pred] = (1 * (pred >= pd.Series(Theta)))
minimizer = {}
for i in range(n):
index_set = [i + j * n for j in range(len(Theta))] # the set of rows for i-th example
W_i = W.iloc[index_set]
Y_i = Y.iloc[index_set]
Y_i.index = range(len(Y_i))
W_i.index = range(len(Y_i))
cost_i = {}
for pred in Theta_mid: # enumerate different possible
# predictions
cost_i[pred] = abs(Y_i - pred_vec[pred]).dot(W_i)
minimizer[i] = min(cost_i, key=cost_i.get)
matX = pd.concat([x]*2, ignore_index=True)
y_1 = pd.Series(1, np.arange(len(x)))
y_0 = pd.Series(0, np.arange(len(x)))
vecY = pd.concat([y_1, y_0], ignore_index=True)
w_1 = pd.Series(minimizer)
w_0 = 1 - pd.Series(minimizer)
vecW =
|
pd.concat([w_1, w_0], ignore_index=True)
|
pandas.concat
|
import streamlit as st
import datetime
import pandas as pd
import numpy as np
import altair as alt
import plotly.express as px
from typing import Sequence, List, Optional, Dict
from sklearn.cluster import KMeans
from collections import defaultdict
from enum import Enum
st.set_page_config(page_title='Covid data analysis',
layout='wide', initial_sidebar_state='expanded')
DATA_URL = "https://opendata.ecdc.europa.eu/covid19/casedistribution/csv"
COUNTRY = "countriesAndTerritories"
POPULATION = 'popData2019'
class ChartType(Enum):
ALTAIR = "Altair"
PLOTLY = "Plotly"
@st.cache(ttl=60*60)
def get_original_data() -> pd.DataFrame:
"""Get the raw data, adding a date field, and removing the _ in country names
"""
data = pd.read_csv(DATA_URL)
# Adjust column names now that data are agreggated by week
data.rename(columns={"cases_weekly": "cases", "deaths_weekly": "deaths"}, inplace=True)
data['date'] = pd.to_datetime(data['dateRep'], dayfirst=True)
data[COUNTRY] = data[COUNTRY].str.replace('_', ' ')
data.sort_values(by='date', inplace=True)
return data
@st.cache(ttl=60*60)
def get_countries_names():
countries = get_original_data()[COUNTRY].unique()
countries.sort()
return countries
@st.cache(ttl=60*60)
def get_population_by_country() -> Dict[str, int]:
"""Get a map of country/population
"""
countries = get_countries_names()
data = get_original_data()
population_for = {}
for c in countries:
try:
population_for[c] = int(data[data[COUNTRY]==c][POPULATION].iloc[0])
except ValueError:
population_for[c] = -1
return population_for
@st.cache
def get_country_data(data:pd.DataFrame, country:str, series:str, is_relative:bool):
df = data[data[COUNTRY] == country]
df.set_index('date', inplace=True)
if is_relative:
# compute nb of case per million
df[series] = df[series] / df[POPULATION] * 1_000_000
df_country = df[[series]]
return df_country
#
# Try to print a human readable number
def human_format_number(n):
formats = [(1e9, 999_500_000, 'B', 1), (1e6, 999_500, 'M', None), (1e3, 1e3, 'K', None)]
for (divid, limit, letter, ndigits) in formats:
if n >= limit:
rounded = round(float(n) / divid, ndigits)
return str(rounded) + letter
return str(n)
# Prepare dataframe for plotting: extract countries, average, ...
def prepare_data(data:pd.DataFrame, countries:Sequence[str], series:str, ma:int, is_relative:bool, is_cumulative:bool,
pivoting:bool=False) -> pd.DataFrame:
df_all: pd.DataFrame = None
for country in countries:
df_country = get_country_data(data, country, series, is_relative).copy(deep=True)
serie_data = df_country[[series]]
if is_cumulative:
serie_data = serie_data.cumsum()
averaged_data = serie_data.rolling(ma).mean()
if not pivoting:
averaged_data['Country'] = country
if df_all is None:
df_all = averaged_data
else:
if pivoting:
df_all[country] = averaged_data
else:
df_all = pd.concat([df_all, averaged_data])
#if not pivoting:
df_all.reset_index(inplace=True)
return df_all
def page_country_analysis():
# Header and load data
col1, col2 = st.beta_columns([1, 4])
with col1:
st.image("images/covid19x100.jpeg")
with col2:
st.title("Analysis of COVID 19 data")
today = datetime.datetime.now().strftime("%B %d, %Y at %H:%M")
st.markdown(f"Done on {today}.")
text_loading = st.markdown("Loading data...")
data = get_original_data()
text_loading.markdown(f"Data loaded: {data.size:,.0f} records, "
"from European Centre for Disease Prevention and Control.\n\n" +
"Number of cases and deaths are cumulated by week.")
if show_sample:
st.subheader("Sample Data:")
st.write(data.sample(5))
# Select Countries
st.header("Country Analysis")
countries = get_countries_names()
population_for = get_population_by_country()
# st.write(population_for)
selected_countries = st.multiselect("Select Countries:",
countries.tolist(),
default=['France', 'Spain', 'Italy'])
selected_countries_pop = ', '.join(['%s (%s)' % (c, human_format_number(population_for[c])) for c in selected_countries])
st.write(selected_countries_pop)
df_all = prepare_data(data, selected_countries, series, ma, is_relative, is_cumulative, pivoting=False)
if show_sample:
st.write("Sample Data")
st.write(df_all)
# Configure graph
cumul = (" (Cumulated)" if is_cumulative else "")
chart_title = (("Number of %s in Selected countries%s" % (series, cumul)) if not is_relative
else ("Number of %s for 1M %s" % (series, cumul)))
if chart_type == ChartType.ALTAIR:
c = alt.Chart(df_all, title=chart_title).mark_line().encode(
x='date:T',
y=(alt.Y(series,
scale=alt.Scale(type=('symlog' if log_scale else 'linear')),
axis=alt.Axis(format=',.0f',
title=(('Nb of %s per 1M habitant' % (series)) if is_relative else ('Nb. of %s'% (series)))),
)),
# Specify domain so that colors are fixed
color=alt.Color('Country', scale=alt.Scale(domain=selected_countries))
)
x = st.altair_chart(c, use_container_width=True)
else:
country_cols = list(df_all.columns)
country_cols.remove("date")
fig = px.line(df_all, title=chart_title, color='Country',
x="date", y=series,
template='none')
fig.update_traces(hovertemplate="<b>%{x|%a %B %d}</b><br>"
+ "%{y}")
fig.update_layout(hovermode="closest")
if log_scale:
fig.update_yaxes(type="log")
st.plotly_chart(fig, use_container_width=True)
# prepare data for clustering
@st.cache
def get_clustering_data(data, countries, series, ma, is_relative, is_cumulative):
df_all = prepare_data(data, countries, series, ma, is_relative, is_cumulative)
df_all.set_index('date', inplace=True)
data_all : Optional[pd.DataFrame] = None
for country in countries:
cnt_data = df_all[df_all['Country'] == country][[series]]
cnt_data.rename(columns={series:country}, inplace=True)
if data_all is None:
data_all = cnt_data
else:
data_all[country] = cnt_data[country]
data_all = data_all.transpose()
data_all.fillna(0, inplace=True)
return data_all
@st.cache
def run_clustering(clus_data, nb_clusters):
kmeans = KMeans(init='random', n_clusters=nb_clusters, n_init=10, max_iter=600)
kmeans.fit(clus_data)
return kmeans
def page_clustering_countries():
col1, col2 = st.beta_columns([1, 4])
with col1:
st.image("images/covid19x100.jpeg")
with col2:
st.title("Clustering Countries Data")
countries = get_countries_names()
population_for = get_population_by_country()
# Excluded countries - by default smaller than 100K
default_excluded_countries = [k for (k,v) in population_for.items() if v < 100_000]
default_excluded_countries.sort()
excluded_countries = st.multiselect("Exclude Countries from clustering:",
countries.tolist(),
default=default_excluded_countries)
excluded_countries_pop = ', '.join(['%s (%s)' % (c, human_format_number(population_for[c])) for c in excluded_countries])
st.write(excluded_countries_pop)
countries = countries.tolist()
for x in excluded_countries:
countries.remove(x)
nb_clusters = st.slider('Number of clusters: ', min_value=1, max_value=10, value=7)
clus_data = get_clustering_data(get_original_data(), countries, series, ma, is_relative, is_cumulative)
run_msg = st.markdown("Running clustering...")
kmeans = run_clustering(clus_data, nb_clusters)
run_msg.markdown("Nb of iterations: " + str(kmeans.n_iter_))
centroids = kmeans.cluster_centers_
df_centro = pd.DataFrame()
for i in range(0, nb_clusters):
cluster_df =
|
pd.DataFrame(data=centroids[i], columns=[series])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
# 数据合并
# 从studentsInfo.xlsx的“Group3”页读取数据,将序号、性别、年龄项保存到data1对象
stu =
|
pd.read_excel('studentsInfo.xlsx', 'Group3')
|
pandas.read_excel
|
from selenium import webdriver as wd
from selenium.webdriver.chrome.options import Options
import time
import csv
import os
import random
import json
import shutil
import pandas as pd
from modules.checker import Checker
from modules.basic_scraping_module import get_response #, get_soup
from modules.supplier_utils.uniform_category_transformer import query_uniform_category
def read_scrapy_setting():
img_hist = "./res3/img_html_source/img_hist.txt"
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
break_point = int(data[1].split(":")[-1].strip())
avg_wait_time = int(data[2].split(":")[-1].strip())
return break_point, avg_wait_time
class Webdriver():
def get_webdriver(self):
chrome_options = Options()
chrome_options.headless = True
wd_path = "D:/geckodriver/chromedriver.exe"
driver = wd.Chrome(wd_path, options=chrome_options)
driver.implicitly_wait(10)
return driver
class Clothes_crawler():
def imgID_padding(self):
csv_path = "./res3/tier_2.csv"
df = pd.read_csv(csv_path)
#print(data.head())
new_col_data = [i for i in range(1, len(df)+1)]
new_col_name = "img_id"
df[new_col_name] = new_col_data
#print(data.tail())
out_csv_path = "./res3/tier_2_modified.csv"
df.to_csv(out_csv_path, encoding="utf-8-sig", index=False)
###########################################################
def copy_single_prod_img(self, img_id, existing_img_id):
img_dir = "./res3/img_html_source/"
shutil.copy(f"{img_dir}{existing_img_id}.jpg", f"{img_dir}{img_id}.jpg")
def download_single_prod_img(self, prod_img_link, img_id, wait_time):
img_path = f"./res3/img_html_source/{img_id}.jpg"
if os.path.exists(img_path):
print(f"[img {img_id}] Image is already exists.")
return 0
# [***] send requests to image link
# put all correct image links to the new csv file
# path: ./res3/img_html_source
if "grey.gif" not in prod_img_link:
try:
r = get_response(prod_img_link)
with open(img_path, "wb") as fp:
fp.write(r.content)
print(f"[img {img_id}] Successfully downloaded.")
# 等待隨機時間 (以傳入參數 wait_time 為中心)
self.wait_some_seconds(wait_time + random.randint(-53,41)/10)
return 1
except:
print(f"[img {img_id}] ERR-2: Fail to access image link when scrapying image")
return -1
else:
print("跳過")
def wait_some_seconds(self, wait_time):
#print(f"(隨機)等待 {wait_time} 秒")
print(f"等待 {wait_time} 秒")
time.sleep(wait_time)
def download_multiple_prod_imgs(self, break_point=-1, wait_time=10):
# reset crawler
self.set_driver()
# read image history if exists
img_hist = "./res3/img_html_source/img_hist.txt"
if os.path.exists(img_hist):
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
img_id_start = int(data[0].split(":")[-1].strip()) # starts from next image of last image in the directory
else:
img_id_start = 5001 # 1
# read image mapping if exists
img_mapping_json = "./res3/img_html_source/img_record.json"
if os.path.exists(img_mapping_json):
with open(img_mapping_json, "r", encoding="utf-8-sig") as fp:
img_mapping = json.load(fp)
else:
img_mapping = dict() # k: prod_link, v: img_id
# create env
env_path = r"./res3/img_html_source"
if not os.path.exists(env_path):
os.mkdir(env_path)
# read product urls from existing tier-2 csv
csv_path = "./res3/tier_2_modified.csv"
prod_data = pd.read_csv(csv_path)
#print(prod_data.tail())
'''
prodIDs, prod_SKU_IDs, prod_links = prod_data["productID"], prod_data["product_SKU_ID"], prod_data["product_link"]
'''
prodIDs, prod_SKU_IDs, prod_img_links = prod_data["productID"], prod_data["product_SKU_ID"], prod_data["product_img_link"]
# test
#print(prodIDs.head())
#print(prod_SKU_IDs.head())
#print(prod_links.head())
for i in range(img_id_start-1, len(prodIDs)): # i starts from 0
prod_img_link = prod_img_links[i]
img_id = i+1 # integer
if i == break_point: # break_point starts from 1
break
print("\n", f"No: {img_id}", sep="")
print(f"prodID: {prodIDs[i]}")
print(f"prod_SKU_ID: {prod_SKU_IDs[i]}")
print(f"prod_img_link: {prod_img_link}")
#if prod_link not in img_mapping.keys():
if not os.path.exists(f"{env_path}/{img_id}.jpg"):
img_mapping[prod_img_link] = img_id
''' 到server圖檔資料庫抓圖片 '''
print(f"[img {img_id}] 圖片不存在,正在抓圖片")
return_val = self.download_single_prod_img(prod_img_link, img_id, wait_time)
if return_val == -1:
break
else:
''' 複製已存圖片 '''
print(f"[img {img_id}] 相同圖片已存在本機,正在複製圖片")
existing_img_id = img_mapping[prod_img_link]
self.copy_single_prod_img(img_id, existing_img_id)
#print("img_mapping:", img_mapping, sep="\n")
# 紀錄 img_id
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
msg = ""
msg += data[0].split(":")[0] + ": " + str(img_id) + "\n" # 更新開始索引
msg += data[1].split(":")[0] + ": " + "\n" # 清空結束索引
msg += data[2]
'''
with open(img_hist, "w", encoding="utf-8-sig") as fp:
fp.write(str(img_id))
'''
with open(img_hist, "w", encoding="utf-8-sig") as fp:
fp.write(msg)
# 紀錄 img_mapping
with open(img_mapping_json, "w", encoding="utf-8-sig") as fp:
json.dump(img_mapping, fp, ensure_ascii=False)
def set_driver(self):
webdriver = Webdriver()
self.driver = webdriver.get_webdriver()
def get_genres(self):
return ["WOMEN","MEN","KIDS","BABY","SPORTS"]
def scroll(self):
# 下拉至頁尾以fetch所有品項
for i in range(4):
self.driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(1)
def save_to_csv(self, list_obj, csv_path, col_names):
if not os.path.exists("./res3"):
os.mkdir("./res3")
record_amount = 0 # in case: csv file isn't exists
if os.path.exists(csv_path):
with open(csv_path, mode='r', encoding="utf-8-sig") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
# 包含header列的總記錄筆數:
record_amount = len([record for record in csv_reader])
with open(csv_path, mode='a', newline="", encoding="utf-8-sig") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=col_names)
if record_amount == 0: # 該csv檔沒有header
writer.writeheader()
for dict_obj in list_obj:
writer.writerow(dict_obj)
print("csv檔案儲存完畢!")
""" Clothes Website: Lativ, Tier-2 Scrapying """
def detailPage_links_crawling(self, page_n): # 銷售標籤頁: 第 n 頁 / 190 頁
try:
self.set_driver()
# 先去讀取存好的 tier_1.csv 資料
path = "./res3/tier_1.csv"
#print(os.path.exists(path))
self.lativ_labels = pd.read_csv(path, header=0)
sales_category_link = self.lativ_labels["link"] # in first, scrapying the label_page
#print(sales_category_link)
data_amount = len(sales_category_link)
print(f"共有 {data_amount} 個銷售分頁")
#####################
''' 最重要的info '''
prod_info_list = list() # [{商品ID,商品鏈結,商品價格,商品圖片,...},{.......}]
child_category_list = list() # 不重複的款式名稱
#####################
xpaths = dict()
xpaths.setdefault("child_categories", "//div[@class='child-category-name is-style']")
xpaths.setdefault("productPage_links", "//td/div[contains(text(),'@@@')]/following-sibling::ul[1]/li[contains(@style,'margin-right')]/a")
xpaths.setdefault("SKU_ID", "//li/a[contains(@href,'!!!')]/following-sibling::div[contains(@class,'product-color-list')]/a")
#####################
print("開始爬蟲...") # 開始爬各個標籤分頁
# 爬取第n頁
sales_categoryID = page_n
link = list(sales_category_link)[page_n-1]
print(f"開始搜尋第 {sales_categoryID} 個銷售分頁 ...")
print(f"網址: {link}")
self.driver.implicitly_wait(10)
self.driver.get(link) # 某一個銷售分頁
self.scroll()
# 先撈出所有 "款式名稱(child categories name) (如:圓領上衣,連帽上衣的「文字」)
tags = self.driver.find_elements_by_xpath(xpaths["child_categories"])
child_category_names = [tag.text.strip() for tag in tags]
print(f"共有 {len(child_category_names)} 種服飾款式")
path = "./res3/child_categories.csv"
# 遍歷所有款式名稱,抓出每一項該款式名稱下的商品資訊
for i, child_category in enumerate(list(child_category_names)):
#for i, child_category in enumerate(list(child_category_names)[:3]):
print(f"正在抓第 {i+1} 種服飾款式:{child_category}")
''' 求算 child_categoryID '''
need_to_append = False
if not os.path.exists(path): # 第一次執行
if child_category not in child_category_list:
need_to_append = True
else:
child_categories = pd.read_csv(path, header=0)
if not any(child_categories["child_category"]==child_category):
[child_category_list.append(-1) for _ in range(len(child_categories["child_categoryID"]))]
need_to_append = True
if need_to_append:
child_category_list.append(child_category)
''' 撈出: 該款式所有衣服「商品links」'''
xpath_link = xpaths["productPage_links"].replace("@@@", child_category)
tags = self.driver.find_elements_by_xpath(xpath_link)
product_links = [tag.get_attribute("href") for tag in tags]
''' 撈出: 該款式所有衣服「商品ID」'''
productIDs = [url.split("/")[-1] for url in product_links]
''' 撈出: 該款式所有衣服「商品SKU_ID」'''
product_SKU_IDs = dict()
for productID in productIDs:
xpath = xpaths["SKU_ID"].replace("!!!", productID)
tags = self.driver.find_elements_by_xpath(xpath)
prod_SKU_links = [tag.get_attribute("href").split("/")[-1] for tag in tags]
product_SKU_IDs.setdefault(productID, prod_SKU_links)
''' 撈出: 該款式所有衣服「商品價格」'''
xpath2 = xpath_link + "/following-sibling::span"
tags = self.driver.find_elements_by_xpath(xpath2)
product_prices = [tag.text.strip() for tag in tags]
''' 撈出: 該款式所有衣服商品「圖片網址」 '''
xpath3 = xpath_link + "/img"
tags = self.driver.find_elements_by_xpath(xpath3)
product_img_links = [tag.get_attribute("src") for tag in tags]
''' 撈出: 該款式所有衣服「商品名稱」'''
xpath4 = xpath_link + "/following-sibling::div[@class='productname']"
tags = self.driver.find_elements_by_xpath(xpath4)
product_names = [tag.text.strip() for tag in tags]
''' 暫存商品資訊 '''
for i in range(len(productIDs)):
productID = productIDs[i]
# 找到該商品所有SKU_ID
product_SKU_ID_list = product_SKU_IDs[productID]
for j in range(len(product_SKU_ID_list)):
product_SKU_ID = product_SKU_ID_list[j]
prod_info_list.append({"productID": productID,
"product_SKU_ID": product_SKU_ID,
"product_name": product_names[i],
"product_price": product_prices[i],
"product_img_link": product_img_links[i],
"product_link": product_links[i],
"child_category": child_category,
"sales_categoryID": sales_categoryID
})
self.save_to_csv(prod_info_list,
"./res3/tier_2.csv",
["productID",
"product_SKU_ID",
"product_name",
"product_price",
"product_img_link",
"product_link",
"child_category",
"sales_categoryID"
])
print(f"第 {sales_categoryID} 個銷售分頁爬蟲成功!")
except:
#print(f"第 {sales_categoryID} 個銷售分頁爬蟲失敗")
print("為保持原子性(Atomicity),不儲存此銷售分頁目前爬到的所有記錄")
finally:
self.driver.close()
""" Clothes Website: Lativ, Tier-1 Scrapying """
def labelPage_links_crawling(self):
print("開始爬蟲...")
self.driver.implicitly_wait(10)
# genre_label_category => category => sales_category
# E.g., {"WOMEN":{"上衣類":{"聯名印花長T","厚棉系列"},"襯衫類":{...},...}}
genre_label_recorder = dict()
#csv_saving_type = 1
for genre in self.get_genres():
print(f"正在爬 {genre} 類商品")
url = "https://www.lativ.com.tw/{}".format(genre)
self.driver.get(url)
# 1. 抓出category的text
# 2. 利用此text,找到其下的所有sales-categories
label_recorder = dict()
categories_text = list()
categories = self.driver.find_elements_by_xpath("//li/h2")
for category in categories:
categories_text.append(category.text)
label_recorder.setdefault(category.text, dict())
for category_text in categories_text:
print(f" 正在爬 {category_text} 標籤下的銷售類別")
xpath = f"//h2[contains(text(),'{category_text}')]" + "/../ul/li/a"
sales_categories = self.driver.find_elements_by_xpath(xpath)
for tag in sales_categories:
label_recorder[category_text].setdefault(tag.text, tag.get_attribute("href"))
genre_label_recorder[genre] = label_recorder
print("爬蟲結束!")
self.driver.close()
# 回傳爬到的所有labels
return genre_label_recorder #, csv_saving_type
def save_duplicated_SKUID_as_json():
checker = Checker()
path = "./res3/duplicated_SKU_IDs.json"
duplicated_SKU_IDs = checker.check_duplicate_SKU_IDs()
checker.save_to_json(duplicated_SKU_IDs, path)
""" Clothes Website: Lativ, Tier-4 Scrapying
(P.S. Tier-3 is for image crawling,
and Tier-4 over there is for color info recrawling)
"""
def product_scrapying(self, csv_tier_2_path, output_csv_path):
# data-structures for providing input info
df = pd.read_csv(csv_tier_2_path)
SPUs, prod_SKU_links = df["product_SPU_ID"], df["product_link"]
# data-structures for the verification use
prod_names = df["product_name"]
spu_value_counts = SPUs.value_counts()
# data-structures for recording output info
output_info = dict()
output_info.setdefault("product_SPU_ID", list())
output_info.setdefault("new_prod_ID", list())
output_info.setdefault("SKU_color_name", list())
xpaths = dict()
xpaths.setdefault("SKU_link", "//div[@class='color']/a")
xpaths.setdefault("SKU_img", "//div[@class='color']/a/img")
recorded_SPUs = dict()
recorded_SPUs.setdefault("valid", list())
recorded_SPUs.setdefault("invalid", list())
#n = 2
for i, v in enumerate(zip(SPUs, prod_SKU_links)):
#for i, v in enumerate(zip(SPUs[:n], prod_SKU_links[:n])):
SPU, prod_link = v[0], v[1]
if SPU not in recorded_SPUs["valid"]+recorded_SPUs["invalid"]:
try:
#recorded_SPUs.append(SPU)
''' Visit `prod_link` '''
self.set_driver()
self.driver.get(prod_link)
wait_time = 6 + random.randint(-26, 26)/10
self.wait_some_seconds(wait_time)
''' Append the current `prod_link` into one type of list in `recorded_SPUs` '''
# Verify the prod_link is REAL or not
# by extracting the product name and comparing
curr_prod_name = self.driver.find_element_by_xpath("//span[@class='title1']")
curr_prod_name = curr_prod_name.text
if prod_names[i] not in curr_prod_name:
recorded_SPUs["invalid"].append(SPU)
print("[WARNING] 因商品名稱與記錄不符,推測應為先前抓取時重導向到無效連結,故此輪爬蟲提早結束(進入下一輪)")
print(f"prod_names[{i}]: {prod_names[i]}")
print(f"curr_prod_name: {curr_prod_name}")
continue
''' Crawl info '''
SKU_links = self.driver.find_elements_by_xpath(xpaths["SKU_link"])
new_prod_IDs = [link.get_attribute("href").split('/')[-1] for link in SKU_links]
# Double-verify the prod_link is REAL or not
# by getting the amount of all SKU products
# and comparing with recorded # of all SKU prods under the same SPU prods
if len(new_prod_IDs) != spu_value_counts[SPU]:
recorded_SPUs["invalid"].append(SPU)
print("[WARNING] 因同一SPU商品下的SKU商品數量與記錄不符,無法正確將欲爬取資訊與先前資料做連結(需手工檢查),故此輪爬蟲提早結束(進入下一輪)")
continue
else:
recorded_SPUs["valid"].append(SPU)
print(f"[INFO] 正在記錄尚未記錄的正確 SPU_ID: {SPU}")
print("[INFO] 正在爬取 商品SKU顏色 資訊...")
imgs = self.driver.find_elements_by_xpath(xpaths["SKU_img"])
SKU_color_names = [img.get_attribute("alt") for img in imgs]
#tmp_SPUs = [str(SPU)] * len(imgs)
for new_prod_ID, SKU_color_name in zip(new_prod_IDs, SKU_color_names):
output_info["product_SPU_ID"].append(SPU)
output_info["new_prod_ID"].append(new_prod_ID)
output_info["SKU_color_name"].append(SKU_color_name)
'''
output_info["product_SPU_ID"].append(", ".join(tmp_SPUs))
output_info["new_prod_ID"].append(", ".join(new_prod_IDs))
output_info["SKU_color_name"].append(", ".join(SKU_color_names))
'''
#wait_time = 3
#print(f"[INFO] 爬蟲結束,等待 {wait_time} 秒")
#time.sleep(wait_time)
except:
print("[WARNING] 此輪發生未知錯誤")
output_df = pd.DataFrame.from_dict(output_info)
output_df.to_csv(output_csv_path,
index=False,
encoding="utf-8-sig")
###########################################################
def make_dir(self, dir_path):
if not os.path.exists(dir_path):
print(f"[INFO] 正在建立資料夾: \"{dir_path}\"",
end='\n'*2)
os.mkdir(dir_path)
else:
print(f"[INFO] 資料夾: \"{dir_path}\" 已存在")
def make_dirs(self, dir_paths):
for path in dir_paths:
self.make_dir(path)
def generate_download_link(self, server_id, spu_id, sku_id):
return f"https://s{server_id}.lativ.com.tw/i/"+\
f"{spu_id}/{spu_id}{sku_id}1/{spu_id}{sku_id}_500.jpg"
def prepare_empty_dirs_and_record_crawling_info(self, tier_1_csv_path, tier_2_csv_path, output_dir, tier_3_csv_path):
''' 建立目標路徑的上層資料夾 (media/products/) '''
paths_to_create = list()
tmp = output_dir.split('/')
#MIN_IDX = 1
#MAX_IDX = len(tmp)+1
MIN_IDX = 2
MAX_IDX = len(tmp)
for i in range(MIN_IDX, MAX_IDX):
#print(f"({i})", end=' ')
#print('/'.join(tmp[:i]))
paths_to_create.append('/'.join(tmp[:i]))
#print(paths_to_create)
self.make_dirs(paths_to_create)
df1 = pd.read_csv(tier_1_csv_path)
sales_cat_table = dict()
genre_category_combs = set()
for _, record in df1.iterrows():
sales_cat_id = record["sales-category ID"]
sales_cat_table.setdefault(sales_cat_id, dict())
genre = record["genre"]
uniform_category = record["uniform_category"]
sales_cat_table[sales_cat_id]["genre"] = genre
sales_cat_table[sales_cat_id]["uniform_category"] = uniform_category
genre_category_combs.add(f"{output_dir}{genre}/{uniform_category}")
# =============================================================================
# example: query `genre`, `category` for `sales-category ID`: 67
# =============================================================================
'''
test_sales_cat_id = 67
print(sales_cat_table[test_sales_cat_id]["genre"])
print(sales_cat_table[test_sales_cat_id]["uniform_category"])
'''
# =============================================================================
# example: list all unrepeated directory
# =============================================================================
'''print(genre_category_combs)'''
''' 建立目標路徑的中層資料夾 (genre/category/) '''
genre_dirs = ['/'.join(e.split('/')[:-1]) for e in genre_category_combs]
self.make_dirs(genre_dirs)
self.make_dirs(genre_category_combs)
''' 利用:
(1) 輪流產生的 server_id (目標靜態伺服器主機)
(2) spu_id
(3) sku_id
※ 註: spu_id+sku_id 唯一組不重複的 SKU商品,擁有唯一商品圖片
=> 下方程式碼先藉由上述資訊,產生:
(1) product_ID (spu_id + sku_id)
(2) server_id (目標靜態伺服器主機)
(3) dl_link (圖片下載鏈結)
(4) img_path (本地圖片位置)
(5) is_dl (是否已下載) | choices: ('Y','N')
並產出一個 csv file: `tier_3.csv`
使 tier_2_v??.csv 可透過 csv 文件找出每一筆
商品紀錄對應的 `本地圖片路徑` & `線上下載網址`
'''
df2 = pd.read_csv(tier_2_csv_path)
product_IDs = df2["product_ID"]
sales_category_IDs = df2["sales_categoryID"]
#print(sales_category_IDs[:1000])
# =============================================================================
# example: get `sales_categoryID` for given `product_ID`
# =============================================================================
#test_product_ID = "52552___03" # expect for "80"
#test_product_ID = "53005___01" # expect for "81"
#print(sales_category_IDs[list(product_IDs).index(test_product_ID)])
product_dirs = list()
#download_links = list()
df3_info = {"product_ID": list(),
"server_id": list(),
"dl_link": list(),
"sales_cat_id": list(),
"img_path": list()}
server_id = 0
SERVER_NUM = 4
for product_ID in set(product_IDs):
spu_id, sku_id = product_ID.split("___")
server_id += 1
if server_id > SERVER_NUM:
server_id = 1
dl_link = self.generate_download_link(server_id, spu_id, sku_id)
#download_links.append(dl_link)
sales_cat_id = sales_category_IDs[list(product_IDs).index(product_ID)]
uniform_category = sales_cat_table[sales_cat_id]["uniform_category"]
product_dir_path = f"{output_dir}"+\
f"{sales_cat_table[sales_cat_id]['genre']}"+\
f"/{uniform_category}/{spu_id}"
img_path = f"{product_dir_path}/{product_ID}.jpg"
'''
print(f"product_ID: {product_ID}")
print(f"server_id: s{server_id}")
print(f"dl_link: {dl_link}")
print(f"sales_cat_id: {sales_cat_id}")
print(f"img_path: {img_path}\n")
'''
df3_info["product_ID"].append(product_ID)
df3_info["server_id"].append(f"s{server_id}")
df3_info["dl_link"].append(dl_link)
df3_info["sales_cat_id"].append(sales_cat_id)
df3_info["img_path"].append(img_path)
product_dirs.append(product_dir_path)
df3 = pd.DataFrame(df3_info)
df3.to_csv(tier_3_csv_path,
index=False,
encoding="utf-8-sig")
'''
#print(len(list(set(df2["product_ID"]))))
#print(len(list(set(df2["product_SPU_ID"]))))
#print(len(list(set(df2["product_SKU_ID"]))))
#print(len(list(set(df2["product_link"]))))
#print(len(download_links))
unrepeated spu+sku spu sku prod_link dl
"tier_2_v2": 4267, 1560, 3296, 1560, 4267
"tier_2_v3": 3296, 1235, 20, 1339, 3296
'''
''' 建立目標路徑的底層資料夾 (genre/category/) '''
self.make_dirs(product_dirs)
#print(product_dirs)
def download_single_image(self, link, img_path, wait_time):
if not os.path.exists(img_path):
try:
print("[INFO] 正在下載圖片")
r = get_response(link)
with open(img_path, "wb") as fp:
fp.write(r.content)
print("[INFO] 圖片獲取成功!\n"+\
f"圖片路徑:\n{img_path}")
# 等待隨機時間 (以傳入參數 wait_time 為中心)
self.wait_some_seconds(wait_time + random.randint(-21,21)/10)
except:
print("[WARNING] 無法獲取圖片")
else:
#print(f"[INFO] 圖片已存在 (路徑: {img_path})")
print("[INFO] 圖片已存在")
def crawl_images(self, tier_1_csv_path, tier_2_csv_path, output_dir, tier_3_csv_path):
''' 爬圖前置作業:
1. 製備階層式圖片資料夾(環境)
2. 將圖片網址、本地路徑等資訊,記錄到 `tier_3_csv_path`
'''
self.prepare_empty_dirs_and_record_crawling_info(tier_1_csv_path, tier_2_csv_path, output_dir, tier_3_csv_path)
'''
爬圖片:
1. 取得 [爬圖前置作業] 記錄在 csv file 的資訊
2. 獲取圖片並儲存
'''
df3 = pd.read_csv(tier_3_csv_path)
dl_links = df3["dl_link"]
img_paths = df3["img_path"]
wait_time = 5
# =============================================================================
# example: download one image to `test_img_path` from `test_dl_link`
# =============================================================================
'''
test_dl_link = "https://www.apowersoft.tw/wp-content/uploads/2017/07/add-ass-subtitles-to-video-logo.jpg"
test_img_path = "D:/MyPrograms/Clothes2U/functions/台灣服飾商 ETL/Lativ_Crawler/res3/media_example/products/WOMEN/內衣類/46431___03/52202___01.jpg"
self.download_single_image(test_dl_link, test_img_path, wait_time)
'''
for dl_link, img_path in zip(dl_links, img_paths):
self.download_single_image(dl_link, img_path, wait_time)
#print(f"{dl_link}\n{img_path}\n")
class Content_Analyzer():
def deduplicate(self, input_csv_path, output_csv_path):
if not os.path.exists(output_csv_path):
''' 1. Get unrepeated data '''
df = pd.read_csv(input_csv_path)
#print(df.shape)
'''x = df[df.duplicated()]
print(x)
print(type(x))
print(len(x))'''
#spu_sku_list = list()
unique_prods = dict()
for index, row in list(df.iterrows()):
#print(row)
spu_id = str(row['productID'])[:5]
sku_id = str(row['product_SKU_ID'])[-3:-1]
uni_product_id = f"{spu_id}___{sku_id}"
if uni_product_id not in unique_prods:
# tier2_v2
'''
unique_prods.setdefault(uni_product_id,
{"product_ID": uni_product_id,
"product_SPU_ID": spu_id,
"product_SKU_ID": sku_id,
"product_name": row["product_name"],
"product_price": row["product_price"],
"product_link": row["product_link"],
"child_category": row["child_category"],
"sales_categoryID": row["sales_categoryID"]})
'''
# tier2_v3
unique_prods.setdefault(uni_product_id,
{"product_ID": uni_product_id,
"product_SPU_ID": spu_id,
"product_SKU_ID": sku_id,
"product_name": row["product_name"],
"product_price": row["product_price"],
"product_link": row["product_link"],
"child_category": row["child_category"],
"sales_categoryID": row["sales_categoryID"]})
else:
curr_child_category = row['child_category']
if not any([curr_child_category == existing_child_cat
for existing_child_cat
in unique_prods[uni_product_id]["child_category"].split("___")
if curr_child_category == existing_child_cat]):
unique_prods[uni_product_id]["child_category"] += f"___{curr_child_category}"
#spu_sku_list.append(f"{row['productID']}___{row['product_SKU_ID']}")
#print(f"{row['productID']}___{row['product_SKU_ID']}")
#print(len(unique_prods))
#print(unique_prods["52010011___52010021"])
#print(len(spu_sku_list))
#print(len(set(spu_sku_list)))
#spu_sku_list = list(set(spu_sku_list))
#print(len(spu_sku_list))
df = pd.DataFrame.from_dict(unique_prods,
orient='index',
columns=["product_ID","product_SPU_ID","product_SKU_ID",
"product_name","product_price","product_link",
"child_category","sales_categoryID"])
#print(df.iloc[0])
''' 2. Save unrepeated data to the new csv file '''
product_SPU_IDs, product_links = df["product_SPU_ID"], df["product_link"]
if len(product_SPU_IDs)==len(product_links):
df.to_csv(output_csv_path,
index=False,
encoding="utf-8-sig")
print(f"[INFO] Writing csv file: {output_csv_path}")
else:
print("[WARNING] The number of `product_SPU_ID` does not equal the number of `product_link`")
def modify_tier_1(self, tier_1_csv_path, output_tier_1_csv_path):
df = pd.read_csv(tier_1_csv_path)
categories = df["category"]
uniform_categories = [query_uniform_category(category) for category in categories]
df["uniform_category"] = pd.Series(uniform_categories, index=df.index)
df.to_csv(output_tier_1_csv_path,
index=False,
encoding="utf-8-sig")
def reordering_csv_records(self, tier_2_csv_path, output_csv_path):
df = pd.read_csv(tier_2_csv_path)
''' 1. 找出所有 `SPU_ID`,並由小到大排列 '''
# =============================================================================
# 就算無set()也可以,因為 `tier_2_v3.csv` 已整理過,不會重複
# =============================================================================
product_IDs = df["product_ID"]
SPU_IDs = df["product_SPU_ID"]
SPU_IDs = sorted(list(set(SPU_IDs)))
# P.S. The info above imply it needs 1,235 queries for pages
# in order to obtain all "color names" for SKU products.
#print(SPU_IDs)
''' 2. 由排序過的 `SPU_ID` 建構 a list of dicts '''
ordered_info = list()
product_ID_indices =
|
pd.Index(product_IDs)
|
pandas.Index
|
import numpy as np
import scipy.misc
import scipy.io
import os
import pandas as pd
from utils.parse_result import parse_result
# def sample_bbs_test(crops_df, liver_masks_path ):
# """Samples bounding boxes around liver region for a test image.
# Args:
# crops_df: DataFrame, each row with filename, boolean indicating if there is liver, x1, x2, y1, y2, zoom.
# liver_masks_path: path to gt liver masks
# Output:
# test_df: DataFrame with rows [x1, y1, 0]. (Then, each bb is of 80x80, and the 0 is related
# to the data augmentation applied, which is none for test images)
# """
# # output
# test_df_rows = []
# for _, row in crops_df.iterrows():
# if row["is_liver"]:
# # constants
# file = row["liver_seg"].split('images_volumes/')[-1]
# mask_filename = file.split('.')[0]
# # binarize liver mask
# print("Mask filename", mask_filename)
# print("liver_masks_path", liver_masks_path)
# mask_liver = scipy.misc.imread(os.path.join(liver_masks_path, mask_filename + '.png'))/255.0
# mask_liver[mask_liver > 0.5] = 1.0
# mask_liver[mask_liver < 0.5] = 0.0
# # add padding to the bounding box
# padding = 25.0
# if row["total_mina"] > padding:
# row["total_mina"] = row["total_mina"] - padding
# if row["total_minb"] > padding:
# row["total_minb"] = row["total_minb"] - padding
# if row["total_maxb"] + padding < 512.0:
# row["total_maxb"] = row["total_maxb"] + 25.0
# if row["total_maxa"] + padding < 512.0:
# row["total_maxa"] = row["total_maxa"] + padding
# mult = 50.0
# max_bbs_a = int((row["total_maxa"]-row["total_mina"])/mult)
# max_bbs_b = int((row["total_maxb"]-row["total_minb"])/mult)
# for x in range (0, max_bbs_a):
# for y in range (0, max_bbs_b):
# mask_liver_aux = mask_liver[int(row["total_mina"] + mult*x):int(row["total_mina"] + (x+1)*mult), int(row["total_minb"] + y*mult):int(row["total_minb"] + (y+1)*mult)]
# pos_liver = np.sum(mask_liver_aux)
# if pos_liver > (25.0*25.0):
# if (row["total_mina"] + mult*x) > 15.0 and ((row["total_mina"] + (x+1)*mult) < 512.0) and (row["total_minb"] + y*mult) > 15.0 and ((row["total_minb"] + (y+1)*mult) < 512.0):
# a1 = row["total_mina"] + mult*x - 15.0
# b1 = row["total_minb"] + y*mult - 15.0
# test_df_rows.append(['images_volumes/{}'.format(file), a1, b1])
# test_df = pd.DataFrame(test_df_rows, columns=["file_name", "a1", "b1"])
# return test_df
def sample_bbs(crops_df, data_aug_options, liver_masks_path, lesion_masks_path):
"""
Samples bounding boxes around liver region for a train image. In this case, we will train two files, one with the positive bounding boxes
and another with the negative bounding boxes.
Args:
crops_df: DataFrame, each row with filename, boolean indicating if there is liver, x1, x2, y1, y2, zoom.
data_aug_options: How many data augmentation options you want to generate for the training images. The maximum is 8.
liver_masks_path: path to gt liver masks
Output:
dict containing 4 dfs under the keys [test_pos, test_neg, train_pos, train_neg].
Each df has rows [file name, x1, y1, data_aug_option] (Then, each bb is of 80x80)
"""
train_positive_df_rows = []
train_negative_df_rows = []
test_positive_df_rows = []
test_negative_df_rows = []
#print(crops_df)
# read in bbs from crops df
for _, row in crops_df.iterrows():
# constants
mask_filename = os.path.splitext(row["liver_seg"])[0]
liver_seg_file = row["liver_seg"].split('liver_seg/')[-1]
if row["is_liver"] and int(mask_filename.split(os.path.sep)[0])!= 59:
# binarize masks
# liver
mask_liver = scipy.misc.imread(os.path.join(liver_masks_path, mask_filename + '.png'))/255.0
mask_liver[mask_liver > 0.5] = 1.0
mask_liver[mask_liver < 0.5] = 0.0
# lesion
mask_lesion = scipy.misc.imread(os.path.join(lesion_masks_path, mask_filename + '.png'))/255.0
mask_lesion[mask_lesion > 0.5] = 1.0
mask_lesion[mask_lesion < 0.5] = 0.0
# add padding
padding = 25.0
if row["total_mina"] > padding:
row["total_mina"] = row["total_mina"] - padding
if row["total_minb"] > padding:
row["total_minb"] = row["total_minb"] - padding
if row["total_maxb"] + padding < 512.0:
row["total_maxb"] = row["total_maxb"] + padding
if row["total_maxa"] + padding < 512.0:
row["total_maxa"] = row["total_maxa"] + padding
mult = 50.0
max_bbs_a = int((row["total_maxa"]-row["total_mina"])/mult)
max_bbs_b = int((row["total_maxb"]-row["total_minb"])/mult)
for x in range (0, max_bbs_a):
for y in range (0, max_bbs_b):
bb = np.array([int(row["total_mina"] + x*mult), int(row["total_mina"] + (x+1)*mult), int(row["total_minb"] + y*mult), int(row["total_minb"] + (y+1)*mult)])
mask_liver_aux = mask_liver[int(row["total_mina"] + mult*x):int(row["total_mina"] + (x+1)*mult), int(row["total_minb"] + y*mult):int(row["total_minb"] + (y+1)*mult)]
pos_liver = np.sum(mask_liver_aux)
if pos_liver > (25.0*25.0):
mask_lesion_aux = mask_lesion[int(row["total_mina"] + mult*x):int(row["total_mina"] + (x+1)*mult), int(row["total_minb"] + y*mult):int(row["total_minb"] + (y+1)*mult)]
pos_lesion = np.sum(mask_lesion_aux)
if (row["total_mina"] + mult*x) > 15.0 and ((row["total_mina"] + (x+1)*mult) < 490.0) and (row["total_minb"] + y*mult) > 15.0 and ((row["total_minb"] + (y+1)*mult) < 490.0):
a1 = row["total_mina"] + mult*x - 15.0
b1 = row["total_minb"] + y*mult - 15.0
if pos_lesion > mult:
if int(liver_seg_file.split(os.path.sep)[-2]) < 105:
for j in range(data_aug_options):
train_positive_df_rows.append(['images_volumes/{}'.format(liver_seg_file), a1, b1, j+1])
else:
test_positive_df_rows.append(['images_volumes/{}'.format(liver_seg_file), a1, b1, 1])
else:
if int(liver_seg_file.split(os.path.sep)[-2]) < 105:
for j in range(data_aug_options):
train_negative_df_rows.append(['images_volumes/{}'.format(liver_seg_file), a1, b1, j+1])
else:
test_negative_df_rows.append(['images_volumes/{}'.format(liver_seg_file), a1, b1, 1])
# make dfs
cols = ["file_name", "a1", "b1", "data_aug_option"]
return {
"test_pos":
|
pd.DataFrame(test_positive_df_rows, columns=cols)
|
pandas.DataFrame
|
import os
import pandas as pd
import pickle
import numpy as np
import lightgbm as lgb
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVR
from sklearn.model_selection import KFold
from openpyxl import load_workbook
from config import parse_args
from src.log import Logger
from src.utils import Jaccard,Cosine,Peason
def save_args(args, logger):
save_args_file = os.path.join(args.root_path, "args.txt")
line = str(args)
with open(save_args_file, mode="w", encoding="utf-8") as wfp:
wfp.write(line + "\n")
logger.info("Args saved in file%s" % save_args_file)
def check_path(args):
assert os.path.exists("./data")
if not os.path.exists(args.log_path):
os.mkdir(args.log_path)
if not os.path.exists(args.processed_path):
os.mkdir(args.processed_path)
def xgb_train(args):
root_path = os.path.join(args.log_path,"third")
# dataset preparing
# determine inputs dtype
value_mol_file = os.path.join(args.raw_path, "Molecular_Descriptor.xlsx")
admet_file = os.path.join(args.raw_path, "ADMET.xlsx")
admet_mat_train = pd.read_excel(admet_file, sheet_name="training")
admet_mat_test = pd.read_excel(admet_file, sheet_name="test")
admet_mat_test_ext = admet_mat_test.copy()
x_values =
|
pd.read_excel(value_mol_file, sheet_name="training")
|
pandas.read_excel
|
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl":
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
import pandas as pd, datetime as dt, numpy as np
import smtplib, re, os, ssl
import credentials, glob
import base64, shutil
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email import encoders
import trackATM as tracker
from templateReport import * # template html of all content of the email
from scanner import *
import yfinance as yahoo
file = []
for filename in glob.iglob('/home/lorenzo/Quanvas/DATABASE/*'):
file.append(filename)
csv =
|
pd.DataFrame(file,columns=['Path'])
|
pandas.DataFrame
|
import argparse
import openpyxl # NOQA: import the module to build up single binary
import pandas
import texttable
from src.data import get_data
def get_formatted_table():
table = texttable.Texttable()
table.set_deco(texttable.Texttable.HEADER)
return table
def count_include_in(products, ingreds):
for ingred in ingreds.values():
p_ids = [p.id for p in products.values() if ingred.id in p.ingreds]
ingred.extend_products(p_ids)
ingred.make_product_names(products)
ingred_list = list(ingreds.values())
return sorted(
ingred_list, key=lambda ingred: len(ingred.products), reverse=True)
def count(products, ingreds, args):
sorted_ingreds = count_include_in(products, ingreds)
table = get_formatted_table()
table.set_cols_dtype(['i', 't', 'i', 't'])
table.set_cols_align(['r', 'l', 'r', 'l'])
table.add_row(['ID', 'Name', 'Num', 'Included'])
for i, ingred in enumerate(sorted_ingreds):
if args.num >= 0 and i >= args.num:
break
p_list = ','.join(ingred.product_names)
if len(p_list) >= 30:
p_list = '{}...'.format(p_list[:27])
table.add_row([ingred.id, ingred.name, len(ingred.products), p_list])
print(table.draw())
def show(products, ingreds, args):
if args.type == 'ingredient' or args.type == 'i':
if args.id is not None:
ingred = ingreds.get(args.id, None)
if ingred is None:
print('id {:d} is not found'.format(args.id))
return
p_names = [
p.name for p in products.values() if ingred.id in p.ingreds]
p_names_str = ','.join(p_names)
print('ID: {:d}'.format(ingred.id))
print('name: {}'.format(ingred.name))
print('included in: {}'.format(p_names_str))
return
table = get_formatted_table()
table.set_cols_dtype(['i', 't'])
table.set_cols_align(['r', 'l'])
table.add_row(['ID', 'Name'])
for i, ingred in enumerate(ingreds.values()):
if args.num >= 0 and i >= args.num:
break
table.add_row([ingred.id, ingred.name])
print(table.draw())
return
if args.type == 'product' or args.type == 'p':
if args.id is not None:
product = products.get(args.id, None)
if product is None:
print('id {:d} is not found'.format(args.id))
return
print('ID: {:d}'.format(product.id))
print('name: {}'.format(product.name))
print('ingredient: {}'.format(','.join(product.ingred_names)))
return
table = get_formatted_table()
table.set_cols_dtype(['i', 't', 't'])
table.set_cols_align(['r', 'l', 'l'])
table.add_row(['ID', 'Name', 'Ingredients'])
for i, p in enumerate(products.values()):
if args.num >= 0 and i >= args.num:
break
ingred_list = ','.join(p.ingred_names)
if len(ingred_list) >= 30:
ingred_list = '{}...'.format(ingred_list[:27])
table.add_row([p.id, p.name, ingred_list])
print(table.draw())
def save_excel(products, ingreds, args):
sorted_ingreds = count_include_in(products, ingreds)
df_count =
|
pandas.DataFrame(columns=['Name', 'Num', 'Included'])
|
pandas.DataFrame
|
from fundamentus import utils
import fundamentus
import pandas as pd
import pytest
###
def test_dt_iso8601_10_10():
assert utils.dt_iso8601('10/10/2020') == '2020-10-10'
def test_dt_iso8601_10_01():
assert utils.dt_iso8601('01/10/2020') == '2020-10-01'
def test_dt_iso8601_01_10():
assert utils.dt_iso8601('10/01/2020') == '2020-01-10'
###
def test_from_pt_br_01():
more_data = { 'col1': [ 11,21,31,41,51],
'col2': [ 12,22,32,42,52],
'col3': [ 13,23,33,43,53]}
b = { 'data': [ '?tst','tst()','tst$./','tst tst','tst__' ]}
b.update(more_data)
a = { 'data': [ 'tst' ,'tst' ,'tst' ,'tst_tst','tst_' ]}
a.update(more_data)
_before = pd.DataFrame( b )
_after = pd.DataFrame( a )
_before['data'] = utils.from_pt_br(_before['data'])
pd.testing.assert_frame_equal( _before, _after)
def test_from_pt_br_02():
_before = pd.DataFrame( { 'data': [ 'mês','Únicoúnico','imóvel','média adíção','tst b' ]} )
_after = pd.DataFrame( { 'data': [ 'mes','Unicounico','imovel','media_adicao','tst_b' ]} )
_before['data'] = utils.from_pt_br(_before['data'])
pd.testing.assert_frame_equal(_before, _after)
###
def test_fmt_dec():
more_data = { 'col1': [ 11,21],
'col2': [ 12,22],
'col3': [ 13,23]}
b = { 'data': [ '45,56%','1.045,56%' ]}
b.update(more_data)
a = { 'data': [ '45.56%','1045.56%' ]}
a.update(more_data)
_before = pd.DataFrame(b)
_after = pd.DataFrame(a)
_before['data'] = utils.fmt_dec(_before['data'])
|
pd.testing.assert_frame_equal(_before, _after)
|
pandas.testing.assert_frame_equal
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
symbol = 'AMD'
market = 'SPY'
num_of_years = 1
start = dt.date.today() - dt.timedelta(days=365*num_of_years)
end = dt.date.today()
dataset = yf.download(symbol,start,end)
benchmark = yf.download(market,start,end)
dataset['Returns'] = dataset['Adj Close'].pct_change().dropna()
PP = pd.Series((dataset['High'] + dataset['Low'] + dataset['Close']) / 3)
R1 = pd.Series(2 * PP - dataset['Low'])
S1 = pd.Series(2 * PP - dataset['High'])
R2 = pd.Series(PP + dataset['High'] - dataset['Low'])
S2 = pd.Series(PP - dataset['High'] + dataset['Low'])
R3 = pd.Series(dataset['High'] + 2 * (PP - dataset['Low']))
S3 = pd.Series(dataset['Low'] - 2 * (dataset['High'] - PP))
R4 = pd.Series(dataset['High'] + 3 * (PP - dataset['Low']))
S4 = pd.Series(dataset['Low'] - 3 * (dataset['High'] - PP))
R5 = pd.Series(dataset['High'] + 4 * (PP - dataset['Low']))
S5 = pd.Series(dataset['Low'] - 4 * (dataset['High'] - PP))
P = pd.Series((dataset['Open'] + (dataset['High'] + dataset['Low'] + dataset['Close'])) / 4) # Opening Price Formula
psr = {'P':P, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3,'R4':R4, 'S4':S4,'R5':R5, 'S5':S5}
PSR = pd.DataFrame(psr)
dataset = dataset.join(PSR)
print(dataset.head())
pivot_point = pd.concat([dataset['Adj Close'],P,R1,S1,R2,S2,R3,S3],axis=1).plot(figsize=(18,12),grid=True)
plt.title('Stock Pivot Point')
plt.legend(['Price','P','R1','S1','R2','S2','R3','S3'], loc=0)
plt.show()
dataset['Adj Close']['2018-05-01':'2018-06-01']
date_range = dataset[['Adj Close','P','R1','S1','R2','S2','R3','S3']]['2018-05-01':'2018-06-01']# Pick Date Ranges
P = pd.Series((dataset['High'] + dataset['Low'] + 2*dataset['Close']) / 4)
R1 = pd.Series(2 * P - dataset['Low'])
S1 = pd.Series(2 * P - dataset['High'])
R2 = pd.Series(P + dataset['High'] - dataset['Low'])
S2 = pd.Series(P - dataset['High'] + dataset['Low'])
wpp = {'P':P, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2}
WPP =
|
pd.DataFrame(wpp)
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
get_ipython().magic('matplotlib inline')
from chatto_transform.sessions.mimic import mimic_common
from chatto_transform.schema.mimic import mimic_schema
from chatto_transform.lib.chunks import left_join
from chatto_transform.transforms.mimic import care_value
import pandas as pd
import numpy as np
# In[ ]:
# Load the Transfers table
# NB: This requires both a 'phitransfers' table which
# differs from the publicly accessible version of MIMIC-III
# in that it contains protected health information (PHI)
transfers = mimic_common.load_table(mimic_schema.phitransfers_schema)
mimic_schema.transfers_schema.add_prefix(transfers)
# Load the Services table
# NB: This requires both a 'phiservices' table which
# differs from the publicly accessible version of MIMIC-III
# in that it contains protected health information (PHI)
services = mimic_common.load_table(mimic_schema.phiservices_schema)
mimic_schema.services_schema.add_prefix(services)
# In[ ]:
# Load the publicly accessible version of the Services table
# and date restrict it simply to reduce the size slightly by eliminating
# entries far outside the dates of interest
services = services[services['services.transfertime'] > pd.Timestamp('20010101')]
# In[ ]:
# Create a 'med_service_only' dataframe: essentially a copy of the Services table that only contains entries
# related to those patients who were taken care of exclusively by the MED service during their hospital admission.
# i.e. curr_service = 'MED' and size(hadm_id) = 1
row_ids = services.groupby('services.hadm_id').size()
row_ids = row_ids[row_ids < 2]
one_service_only = services[services['services.hadm_id'].isin(row_ids.index)]
med_service_only = one_service_only[one_service_only['services.curr_service'] == 'MED']
# In[ ]:
# Left join transfers to med_service_only.
# This creates a dataframe 'df' where every transfer in the database is represented, but only those patients
# taken care of exclusively by the MED service during their stay have data from the Services table.
df = left_join(transfers, med_service_only, left_on='transfers.hadm_id', right_on='services.hadm_id')
# In[ ]:
# Remove transfers that are not related to an ICU stay
df2 = df[df['transfers.icustay_id'].notnull()]
# Filter to specified dates
# MICU == CC6D & CC7D after April 10th, 2006 (until end of dataset)
df3 = df2[(df2['transfers.intime'] > pd.Timestamp('20060410'))]
# Select out those patients who were under the care of either of a 'West Campus' MICU team
# MSICU is a MICU but it is on the 'East Campus' and not of interest in this study.
df4 = df3[(df3['services.curr_service'] == 'MED') & (df3['transfers.curr_careunit'] != 'MSICU')]
# In[ ]:
# Trim down the dataframe that we will check each MICU patient against to
# determine the presence of inboarders (non-MICU patients boarding in the MICU)
inboarders = df3[(df3['services.curr_service'] != 'MED') &
((df3['curr_ward'] == 'CC6D') | (df3['curr_ward'] == 'CC7D'))]
inboarders = inboarders[['transfers.intime', 'transfers.outtime', 'curr_ward']]
# In[ ]:
# For each patient under the care of a West Campus MICU team, calculate the number of
# non-MICU patients (i.e. cared for by other ICU teams) physically occupying MICU beds
# Start with a copy of the dataframe containing all the MICU patients
df5 = df4
# Create a column that defines 1 = patient being cared for by a MICU team in a location other
# than a MICU (e.g. in the SICU). We default to 0 here, then change the value if appropriate during for loop below.
df5['boarder_status'] = 0
# Create a column that distinguishes whether the patient is on the MICU Orange or Green service
# 0 = Orange, 1 = Green
df5['micu_team'] = 0
# Create columns that specify how many non-MICU patients were occupying MICU beds at the time
# each patient was admitted/transferred to the care of a MICU team
df5['cc6d_boarder_count'] = np.nan
df5['cc7d_boarder_count'] = np.nan
df5['total_boarder_count'] = np.nan
for row_index, row in df5.iterrows():
# Determine which patients in the inboarders dataframe (non-MICU patients in MICU beds) were in
# MICU-Orange (CC6D) and MICU-Green (CC7D) beds at the time of each MICU patient's ICU stay intime
cc6d_boarders = inboarders[((inboarders['transfers.intime'] < row['transfers.intime']) &
(inboarders['transfers.outtime'] > row['transfers.intime'])) &
(inboarders['curr_ward'] == 'CC6D')]
cc7d_boarders = inboarders[((inboarders['transfers.intime'] < row['transfers.intime']) &
(inboarders['transfers.outtime'] > row['transfers.intime'])) &
(inboarders['curr_ward'] == 'CC7D')]
# Create a new dataframe by concatenating the CC6D and CC7D boarder dataframes
combined_boarders = pd.concat([cc6d_boarders, cc7d_boarders])
# Store the inboarder counts in their respective columns
df5.ix[row_index, 'cc6d_boarder_count'] = len(cc6d_boarders.index)
df5.ix[row_index, 'cc7d_boarder_count'] = len(cc7d_boarders.index)
df5.ix[row_index, 'total_boarder_count'] = len(combined_boarders.index)
# If this row represents a MICU patient boarding in a non-MICU ICU bed, change 'boarder_status' to 1 (default = 0)
if ((row['curr_ward'] != 'CC6D') & (row['curr_ward'] != 'CC7D')):
df5.ix[row_index, 'boarder_status'] = 1
# If this is a MICU patient boarding in the CVICU, it is most likely a patient cared for by the MICU Green team
if (row['transfers.curr_careunit'] == 'CVICU'):
df5.ix[row_index, 'micu_team'] = 1
# If this row represents a MICU patient in CC7D, it is almost certainly a patient cared for by the MICU Green team
if (row['curr_ward'] == 'CC7D'):
df5.ix[row_index, 'micu_team'] = 1
# In[2]:
# Store df5
# mimic_common.df_to_csv('df5.csv', df5)
# Load df5 from stored CSV file (if we don't want to have to re-generate it)
# df5 = pd.read_csv('~/dev/data/mimic3_local_storage/df5.csv', parse_dates=[8, 15, 20])
# In[3]:
# Add the OASIS severity of illness scores to each row
oasis = pd.read_csv('~/chatto-transform/oasis.csv')
df5 = left_join(df5, oasis[['ICUSTAY_ID', 'OASIS']], left_on='transfers.icustay_id', right_on='ICUSTAY_ID')
df5 = df5.drop('ICUSTAY_ID', 1)
# Add the Elixhauser comorbidity scores to each row
elixhauser = pd.read_csv('~/chatto-transform/elixhauser.csv')
df5 = left_join(df5, elixhauser, left_on='transfers.hadm_id', right_on='hadm_id')
# In[6]:
# Team census and outboarder count for the MICU team taking care of a given patient
df5['team_census'] = np.nan
df5['team_outboarders'] = np.nan
df5['team_census_same_room'] = np.nan
# Average severity of illness measures for the ICU as a whole at a given time
df5['team_census_oasis_mean_combined'] = np.nan
df5['team_census_oasis_median_combined'] = np.nan
df5['team_census_oasis_mean_boarders'] = np.nan
df5['team_census_oasis_median_boarders'] = np.nan
df5['team_census_oasis_mean_nonboarders'] = np.nan
df5['team_census_oasis_median_nonboarders'] = np.nan
df5['team_census_oasis_mean_same_room'] = np.nan
df5['team_census_oasis_median_same_room'] = np.nan
df5['team_census_elixhauser_28day_mean_combined'] = np.nan
df5['team_census_elixhauser_28day_median_combined'] = np.nan
df5['team_census_elixhauser_28day_mean_boarders'] = np.nan
df5['team_census_elixhauser_28day_median_boarders'] = np.nan
df5['team_census_elixhauser_28day_mean_nonboarders'] = np.nan
df5['team_census_elixhauser_28day_median_nonboarders'] = np.nan
df5['team_census_elixhauser_28day_mean_same_room'] = np.nan
df5['team_census_elixhauser_28day_median_same_room'] = np.nan
df5['team_census_elixhauser_hospital_mean_combined'] = np.nan
df5['team_census_elixhauser_hospital_median_combined'] = np.nan
df5['team_census_elixhauser_hospital_mean_boarders'] = np.nan
df5['team_census_elixhauser_hospital_median_boarders'] = np.nan
df5['team_census_elixhauser_hospital_mean_nonboarders'] = np.nan
df5['team_census_elixhauser_hospital_median_nonboarders'] = np.nan
df5['team_census_elixhauser_hospital_mean_same_room'] = np.nan
df5['team_census_elixhauser_hospital_median_same_room'] = np.nan
# For each MICU patient...
for row_index, row in df5.iterrows():
# ... being taken care of by the MICU-Orange team ...
if (row['micu_team'] == 0):
# Determine how many patients (boarders + non-boarders) were assigned to the MICU Orange team at that time
# NOT INCLUSIVE OF THIS PATIENT
census = df5[(df5['transfers.intime'] < row['transfers.intime']) &
(df5['transfers.outtime'] > row['transfers.intime']) &
(df5['micu_team'] == 0)]
# Determine how many NON-boarders the MICU-Orange service was taking care of at that time.
# NOT INCLUSIVE OF THIS PATIENT
nonboarders = census[census['transfers.curr_ward'] == 'CC6D']
# Determine how many boarders the MICU-Orange service was taking care of at that time.
# NOT INCLUSIVE OF THIS PATIENT
outboarders = census[census['transfers.curr_ward'] != 'CC6D']
# outboarders = df5[(df5['transfers.intime'] < row['transfers.intime']) &
# (df5['transfers.outtime'] > row['transfers.intime']) &
# (df5['micu_team'] == 0) &
# (df5['curr_ward'] != 'CC6D')]
# Determine how many patients the MICU-Orange service was taking care of at that time...
# ...IN THE SAME ROOM AS THIS PATIENT
# ...NOT INCLUSIVE OF THIS PATIENT
census_same_room = census[census['transfers.curr_ward'] == row['transfers.curr_ward']]
# ... being taken care of by the MICU-Green team ...
else:
# Determine how many patients (boarders + non-boarders) were assigned to the MICU Green team at that time
# NOT INCLUSIVE OF THIS PATIENT
census = df5[(df5['transfers.intime'] < row['transfers.intime']) &
(df5['transfers.outtime'] > row['transfers.intime']) &
(df5['micu_team'] == 1)]
# Determine how many NON-boarders the MICU-Green service was taking care of at that time.
# NOT INCLUSIVE OF THIS PATIENT
nonboarders = census[census['transfers.curr_ward'] == 'CC7D']
# Determine how many boarders the MICU-Green service was taking care of at that time.
# NOT INCLUSIVE OF THIS PATIENT
outboarders = census[census['transfers.curr_ward'] != 'CC7D']
# outboarders = df5[(df5['transfers.intime'] < row['transfers.intime']) &
# (df5['transfers.outtime'] > row['transfers.intime']) &
# (df5['micu_team'] == 1) &
# (df5['curr_ward'] != 'CC7D')]
# Determine how many patients the MICU-Orange service was taking care of at that time...
# ...IN THE SAME ROOM AS THIS PATIENT
# ...NOT INCLUSIVE OF THIS PATIENT
census_same_room = census[census['transfers.curr_ward'] == row['transfers.curr_ward']]
df5.ix[row_index, 'team_census'] = len(census.index)
df5.ix[row_index, 'team_outboarders'] = len(outboarders)
df5.ix[row_index, 'team_census_same_room'] = len(census_same_room)
df5.ix[row_index, 'team_census_oasis_mean_combined'] = census['OASIS'].mean()
df5.ix[row_index, 'team_census_oasis_median_combined'] = census['OASIS'].median()
df5.ix[row_index, 'team_census_oasis_mean_boarders'] = outboarders['OASIS'].mean()
df5.ix[row_index, 'team_census_oasis_median_boarders'] = outboarders['OASIS'].median()
df5.ix[row_index, 'team_census_oasis_mean_nonboarders'] = nonboarders['OASIS'].mean()
df5.ix[row_index, 'team_census_oasis_median_nonboarders'] = nonboarders['OASIS'].median()
df5.ix[row_index, 'team_census_oasis_mean_same_room'] = census_same_room['OASIS'].mean()
df5.ix[row_index, 'team_census_oasis_median_same_room'] = census_same_room['OASIS'].median()
df5.ix[row_index, 'team_census_elixhauser_28day_mean_combined'] = census['elixhauser_28day'].mean()
df5.ix[row_index, 'team_census_elixhauser_28day_median_combined'] = census['elixhauser_28day'].median()
df5.ix[row_index, 'team_census_elixhauser_28day_mean_boarders'] = outboarders['elixhauser_28day'].mean()
df5.ix[row_index, 'team_census_elixhauser_28day_median_boarders'] = outboarders['elixhauser_28day'].median()
df5.ix[row_index, 'team_census_elixhauser_28day_mean_nonboarders'] = nonboarders['elixhauser_28day'].mean()
df5.ix[row_index, 'team_census_elixhauser_28day_median_nonboarders'] = nonboarders['elixhauser_28day'].median()
df5.ix[row_index, 'team_census_elixhauser_28day_mean_same_room'] = census_same_room['elixhauser_28day'].mean()
df5.ix[row_index, 'team_census_elixhauser_28day_median_same_room'] = census_same_room['elixhauser_28day'].median()
df5.ix[row_index, 'team_census_elixhauser_hospital_mean_combined'] = census['elixhauser_hospital'].mean()
df5.ix[row_index, 'team_census_elixhauser_hospital_median_combined'] = census['elixhauser_hospital'].median()
df5.ix[row_index, 'team_census_elixhauser_hospital_mean_boarders'] = outboarders['elixhauser_hospital'].mean()
df5.ix[row_index, 'team_census_elixhauser_hospital_median_boarders'] = outboarders['elixhauser_hospital'].median()
df5.ix[row_index, 'team_census_elixhauser_hospital_mean_nonboarders'] = nonboarders['elixhauser_hospital'].mean()
df5.ix[row_index, 'team_census_elixhauser_hospital_median_nonboarders'] = nonboarders['elixhauser_hospital'].median()
df5.ix[row_index, 'team_census_elixhauser_hospital_mean_same_room'] = census_same_room['elixhauser_hospital'].mean()
df5.ix[row_index, 'team_census_elixhauser_hospital_median_same_room'] = census_same_room['elixhauser_hospital'].median()
# In[7]:
# Store df5v2
# mimic_common.df_to_csv('df5v2.csv', df5)
# Load df5v2 from stored CSV file (if we don't want to have to re-generate it)
# df5 = pd.read_csv('~/dev/data/mimic3_local_storage/df5v2.csv', parse_dates=[8, 15, 20])
# In[8]:
# Team census and outboarder count for the OTHER MICU team (the one NOT caring for a given patient)
df5['other_team_census'] = np.nan
df5['other_team_outboarders'] = np.nan
# Average severity of illness measures for the ICU as a whole at a given time
df5['other_team_census_oasis_mean'] = np.nan
df5['other_team_census_oasis_median'] = np.nan
df5['other_team_census_elixhauser_28day_mean'] = np.nan
df5['other_team_census_elixhauser_28day_median'] = np.nan
df5['other_team_census_elixhauser_hospital_mean'] = np.nan
df5['other_team_census_elixhauser_hospital_median'] = np.nan
# For each MICU patient...
for row_index, row in df5.iterrows():
# ... being taken care of by the MICU-Orange team ...
if (row['micu_team'] == 0):
# Determine how many patients (boarders + non-boarders) were assigned to the MICU Green team at that time
census = df5[(df5['transfers.intime'] < row['transfers.intime']) &
(df5['transfers.outtime'] > row['transfers.intime']) &
(df5['micu_team'] == 1)]
# Determine how many boarders the MICU-Green service was taking care of at that time.
outboarders = census[census['transfers.curr_ward'] != 'CC7D']
# outboarders = df5[(df5['transfers.intime'] < row['transfers.intime']) &
# (df5['transfers.outtime'] > row['transfers.intime']) &
# (df5['micu_team'] == 1) &
# (df5['curr_ward'] != 'CC7D')]
# ... being taken care of by the MICU-Green team ...
else:
# Determine how many patients (boarders + non-boarders) were assigned to the MICU Orange team at that time
census = df5[(df5['transfers.intime'] < row['transfers.intime']) &
(df5['transfers.outtime'] > row['transfers.intime']) &
(df5['micu_team'] == 0)]
# Determine how many boarders the MICU-Orange service was taking care of at that time.
outboarders = census[census['transfers.curr_ward'] != 'CC6D']
# outboarders = df5[(df5['transfers.intime'] < row['transfers.intime']) &
# (df5['transfers.outtime'] > row['transfers.intime']) &
# (df5['micu_team'] == 0) &
# (df5['curr_ward'] != 'CC6D')]
df5.ix[row_index, 'other_team_census'] = len(census.index)
df5.ix[row_index, 'other_team_outboarders'] = len(outboarders)
df5.ix[row_index, 'other_team_census_oasis_mean'] = census['OASIS'].mean()
df5.ix[row_index, 'other_team_census_oasis_median'] = census['OASIS'].median()
df5.ix[row_index, 'other_team_census_elixhauser_28day_mean'] = census['elixhauser_28day'].mean()
df5.ix[row_index, 'other_team_census_elixhauser_28day_median'] = census['elixhauser_28day'].median()
df5.ix[row_index, 'other_team_census_elixhauser_hospital_mean'] = census['elixhauser_hospital'].mean()
df5.ix[row_index, 'other_team_census_elixhauser_hospital_median'] = census['elixhauser_hospital'].median()
# In[9]:
# Store df5v2b
# mimic_common.df_to_csv('df5v2b.csv', df5)
# Load df5v2 from stored CSV file (if we don't want to have to re-generate it)
# df5 = pd.read_csv('~/dev/data/mimic3_local_storage/df5v2b.csv', parse_dates=[8, 15, 20])
# In[10]:
# Load the Transfers table
msicu_transfers = mimic_common.load_table(mimic_schema.phitransfers_schema)
mimic_schema.transfers_schema.add_prefix(msicu_transfers)
# Time restrict
msicu_transfers = msicu_transfers[(msicu_transfers['transfers.intime'] > pd.Timestamp('20060401'))]
# Location restrict to the MSICU
msicu_transfers = msicu_transfers[(msicu_transfers['transfers.curr_careunit'] == 'MSICU')]
# In[11]:
# Add the OASIS severity of illness scores to each row
oasis = pd.read_csv('~/chatto-transform/oasis.csv')
msicu_transfers = left_join(msicu_transfers, oasis[['ICUSTAY_ID', 'OASIS']], left_on='transfers.icustay_id', right_on='ICUSTAY_ID')
msicu_transfers = msicu_transfers.drop('ICUSTAY_ID', 1)
# Add the Elixhauser comorbidity scores to each row
elixhauser = pd.read_csv('~/chatto-transform/elixhauser.csv')
msicu_transfers = left_join(msicu_transfers, elixhauser, left_on='transfers.hadm_id', right_on='hadm_id')
# In[12]:
# Team census and outboarder count for the Med/Surg ICU (an ICU on the hospital's other campus)
df5['msicu_team_census'] = np.nan
# df5['msicu_team_outboarders'] = np.nan
# Average severity of illness measures for the ICU as a whole at a given time
df5['msicu_team_census_oasis_mean'] = np.nan
df5['msicu_team_census_oasis_median'] = np.nan
df5['msicu_team_census_elixhauser_28day_mean'] = np.nan
df5['msicu_team_census_elixhauser_28day_median'] = np.nan
df5['msicu_team_census_elixhauser_hospital_mean'] = np.nan
df5['msicu_team_census_elixhauser_hospital_median'] = np.nan
# For each MICU patient...
for row_index, row in df5.iterrows():
# Determine how many patients (boarders + non-boarders) were assigned to the MICU Green team at that time
census = msicu_transfers[(msicu_transfers['transfers.intime'] < row['transfers.intime']) &
(msicu_transfers['transfers.outtime'] > row['transfers.intime'])]
df5.ix[row_index, 'msicu_team_census'] = len(census.index)
df5.ix[row_index, 'msicu_team_census_oasis_mean'] = census['OASIS'].mean()
df5.ix[row_index, 'msicu_team_census_oasis_median'] = census['OASIS'].median()
df5.ix[row_index, 'msicu_team_census_elixhauser_28day_mean'] = census['elixhauser_28day'].mean()
df5.ix[row_index, 'msicu_team_census_elixhauser_28day_median'] = census['elixhauser_28day'].median()
df5.ix[row_index, 'msicu_team_census_elixhauser_hospital_mean'] = census['elixhauser_hospital'].mean()
df5.ix[row_index, 'msicu_team_census_elixhauser_hospital_median'] = census['elixhauser_hospital'].median()
# In[13]:
# Store df5v2c
# mimic_common.df_to_csv('df5v2c.csv', df5)
# Load df5v2c from stored CSV file (if we don't want to have to re-generate it)
# df5 = pd.read_csv('~/dev/data/mimic3_local_storage/df5v2c.csv', parse_dates=[8, 15, 20])
# In[14]:
# Add a column that estimates the EXPECTED number of outboarders
df5['expected_team_outboarders'] = np.nan
df5.expected_team_outboarders[(df5['micu_team'] == 0)] = (df5['team_census'] - (8 - df5['cc6d_boarder_count']))
df5.expected_team_outboarders[(df5['micu_team'] == 1)] = (df5['team_census'] - (8 - df5['cc7d_boarder_count']))
# Add a column that estimates the EXPECTED number of remaining beds in the nominal ICU of the team caring for the patient
df5['remaining_beds'] = np.nan
df5.remaining_beds[(df5['micu_team'] == 0)] = (8 - (df5['team_census'] - df5['team_outboarders']) - df5['cc6d_boarder_count'])
df5.remaining_beds[(df5['micu_team'] == 1)] = (8 - (df5['team_census'] - df5['team_outboarders']) - df5['cc7d_boarder_count'])
# In[15]:
# Add a column that estimates the EXPECTED number of outboarders for the OTHER MICU team
# (the one NOT taking care of the patient)
df5['other_expected_team_outboarders'] = np.nan
df5.other_expected_team_outboarders[(df5['micu_team'] == 0)] = (df5['other_team_census'] - (8 - df5['cc7d_boarder_count']))
df5.other_expected_team_outboarders[(df5['micu_team'] == 1)] = (df5['other_team_census'] - (8 - df5['cc6d_boarder_count']))
# Add a column that estimates the EXPECTED number of remaining beds in the OTHER MICU
# (the one NOT taking care of the patient)
df5['other_remaining_beds'] = np.nan
df5.other_remaining_beds[(df5['micu_team'] == 0)] = (8 - (df5['other_team_census'] - df5['other_team_outboarders']) - df5['cc7d_boarder_count'])
df5.other_remaining_beds[(df5['micu_team'] == 1)] = (8 - (df5['other_team_census'] - df5['other_team_outboarders']) - df5['cc6d_boarder_count'])
# In[54]:
# Store df5v3
# mimic_common.df_to_csv('df5v3.csv', df5)
# Load df5v3 from stored CSV file (if we don't want to have to re-generate it)
df5 = pd.read_csv('~/dev/data/mimic3_local_storage/df5v3.csv', parse_dates=[8, 15, 20])
# In[55]:
# Join admissions, patients and icustays tables into 'mortality' dataframe
icustays = mimic_common.load_table(mimic_schema.icustays_schema)
mimic_schema.icustays_schema.add_prefix(icustays)
patients = mimic_common.load_table(mimic_schema.patients_schema)
mimic_schema.patients_schema.add_prefix(patients)
admissions = mimic_common.load_table(mimic_schema.admissions_schema)
mimic_schema.admissions_schema.add_prefix(admissions)
mortality = left_join(icustays[['icustays.subject_id', 'icustays.hadm_id', 'icustays.icustay_id', 'icustays.intime', 'icustays.outtime']],
patients[['patients.subject_id', 'patients.gender', 'patients.dob', 'patients.dod', 'patients.dod_hosp', 'patients.dod_ssn']],
left_on='icustays.subject_id', right_on='patients.subject_id')
mortality = left_join(mortality,
admissions[['admissions.hadm_id', 'admissions.admittime', 'admissions.dischtime', 'admissions.deathtime',
'admissions.admission_type', 'admissions.admission_location', 'admissions.edregtime', 'admissions.edouttime', 'admissions.hospital_expire_flag',
'admissions.discharge_location', 'admissions.ethnicity']],
left_on='icustays.hadm_id', right_on='admissions.hadm_id')
# Join the mortality dataframe to the rest of the data
df6 = left_join(df5, mortality, left_on='transfers.icustay_id', right_on='icustays.icustay_id')
# In[56]:
# Create a hospital_expire_flag and icustay_expire_flag.
# It is important to use 'intime' and 'outtime' from icustays table and NOT from transfers table because the
# former state the in/out times for the entire ICU stay (which may include multiple transfer events for patients
# that move from one ICU to another), whereas the latter state the in/out times per patient bed transfer onnly.
# NB: The icustay/hospital_expire_flag_MOD variables add 24 hours to the end of the time interval during which
# the ICU and hospital, respectively, will have a death attributed to them. This serves several purposes:
# 1. Some deaths in the ICU may have DOD recorded as occurring several hours after ICU outtime
# 2. Some deaths in the hospital may have DOD recorded as occurring several hours after hospital discharge time
# 3. It is likely reasonable to attribute deaths occurring within 24 hours of a patient leaving an ICU or hospital
# as being related to the management or transitions of care practices of the ICU or hospital respectively.
df6['hospital_expire_flag'] = 0
df6['hospital_expire_flag_mod'] = 0
df6['icustay_expire_flag'] = 0
df6['icustay_expire_flag_mod'] = 0
df6.hospital_expire_flag[(df6['patients.dod'] > df6['admissions.admittime']) &
(df6['patients.dod'] <= df6['admissions.dischtime'])] = 1
df6.icustay_expire_flag[(df6['patients.dod'] > df6['icustays.intime']) &
(df6['patients.dod'] <= df6['icustays.outtime'])] = 1
df6.hospital_expire_flag_mod[(df6['patients.dod'] > df6['admissions.admittime']) &
(df6['patients.dod'] <= (df6['admissions.dischtime'] + pd.Timedelta(hours=24)))] = 1
df6.icustay_expire_flag_mod[(df6['patients.dod'] > df6['icustays.intime']) &
(df6['patients.dod'] <= (df6['icustays.outtime'] + pd.Timedelta(hours=24)))] = 1
# In[57]:
# Calculate the MINIMUM number of days survived:
# NB: 20130601 is a ***PLACEHOLDER*** for the exact date that the Social Security Death Index was last queried
df6['days_survived'] = np.nan
df6.days_survived[(df6['patients.dod'].notnull())] = ((df6['patients.dod'] - df6['icustays.intime']).astype(int)/(1000000000*60*60*24))
df6.days_survived[(df6['patients.dod'].isnull())] = ((
|
pd.Timestamp('20130601')
|
pandas.Timestamp
|
'''
Pandas有两个主要的数据结构: Series和DataFrame
Series是一种类似于一组数组的对象, 它是由一组数据(各种NumPy数据类型)以及一组
与之相关的数据标签(即索引)组成.仅由一组数据即可产生最简单的Series:
'''
import pandas as pd
from pandas import Series, DataFrame
obj = pd.Series([4, 7, -5, 3])
print(obj)
'''
output
0 4
1 7
2 -5
3 3
dtype: int64
'''
'''
Series的字符串表现形式为: 索引在左边, 值在右边.
由于没有为数据指定索引, 于是为自动创建一个0到N-1(N为数组的长度)的整数型索引.
可以通过Series的values和index属性获取其数组表示形式和索引对象.
'''
print(obj.values)
# output: [ 4 7 -5 3]
print(obj.index)
# output: RangeIndex(start=0, stop=4, step=1)
# 通常,我们希望所创建的Series带有一个可以对各个数据点进行标记的索引
obj2 =
|
pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
|
pandas.Series
|
'''IO functions for various formats used: trace, sinex etc '''
import glob as _glob
import logging as _logging
import re as _re
import zlib
from io import BytesIO as _BytesIO
import numpy as _np
import pandas as _pd
# from p_tqdm import p_map as _p_map
from ..gn_const import PT_CATEGORY, TYPE_CATEGORY
from ..gn_datetime import yydoysec2datetime as _yydoysec2datetime
from .common import path2bytes
_RE_BLK_HEAD = _re.compile(rb"\+S\w+\/\w+(\s[LU]|)\s*(CORR|COVA|INFO|)[ ]*\n(?:\*[ ].+\n|)(?:\*\w.+\n|)")
_RE_STATISTICS = _re.compile(r"^[ ]([A-Z (-]+[A-Z)])[ ]+([\d+\.]+)", _re.MULTILINE)
def _get_valid_stypes(stypes):
'''Returns only stypes in allowed list
Fastest if stypes size is small'''
allowed_stypes = ['EST','APR', 'NEQ']
stypes = set(stypes) if not isinstance(stypes,set) else stypes
ok_stypes = sorted(stypes.intersection(allowed_stypes),key=allowed_stypes.index) # need EST to always be first
if len(ok_stypes) != len(stypes):
not_ok_stypes = stypes.difference(allowed_stypes)
_logging.error(f'{not_ok_stypes} not supported')
return ok_stypes
def _snx_extract_blk(snx_bytes, blk_name, remove_header=False):
'''
Extracts a blk content from a sinex databytes using the + and - blk_name bounds
Works for both vector and matrix blks.
Returns blk content (with or without header), count of content lines (ignooring the header),
matrix form [L or U] and matrix content type [INFO, COVA, CORR].
The latter two are empty in case of vector blk'''
blk_begin = snx_bytes.find(f'+{blk_name}'.encode())
blk_end = snx_bytes.find(f'-{blk_name}'.encode(), blk_begin)
if blk_begin == -1:
_logging.info(f'{blk_name} blk missing')
return None #if there is no block begin bound -> None is returned
if blk_end == -1:
_logging.info(f'{blk_name} blk corrupted')
return None
head_search = _RE_BLK_HEAD.search(string=snx_bytes, pos=blk_begin)
ma_form, ma_content = head_search.groups()
blk_content = snx_bytes[head_search.end():blk_end]
# blk content without header (usual request)
lines_count = blk_content.count(b'\n')
if lines_count == 0:
_logging.error(f'{blk_name} blk is empty')
return None
#may be skipped for last/first block (TODO)
if not remove_header:
blk_content = snx_bytes[head_search.span(2)[1]:blk_end]
# if header requested (1st request only)
return blk_content, lines_count, ma_form.decode(), ma_content.decode()
# ma_form, ma_content only for matrix
def _snx_extract(snx_bytes, stypes, obj_type, verbose=True):
# obj_type= matrix or vector
if obj_type == 'MATRIX':
stypes_dict = {
'EST': 'SOLUTION/MATRIX_ESTIMATE',
'APR': 'SOLUTION/MATRIX_APRIORI',
'NEQ': 'SOLUTION/NORMAL_EQUATION_MATRIX'
}
elif obj_type == 'VECTOR':
stypes_dict = {
'EST': 'SOLUTION/ESTIMATE',
'APR': 'SOLUTION/APRIORI',
'NEQ': 'SOLUTION/NORMAL_EQUATION_VECTOR',
'ID' : 'SITE/ID'
}
snx_buffer = b''
stypes_form, stypes_content, stypes_rows = {}, {}, {}
objects_in_buf = 0
for stype in stypes:
if stype in stypes_dict.keys():
remove_header = objects_in_buf != 0
if (objects_in_buf == 0) & (obj_type == 'MATRIX'): # override matrix header as comments may be present
snx_buffer+=b'*PARA1 PARA2 ____PARA2+0__________ ____PARA2+1__________ ____PARA2+2__________\n'
remove_header = True
stype_extr = _snx_extract_blk(snx_bytes=snx_bytes,
blk_name=stypes_dict[stype],
remove_header= remove_header)
if stype_extr is not None:
snx_buffer += stype_extr[0]
stypes_rows[stype] = stype_extr[1]
stypes_form[stype] = stype_extr[2] #dict of forms
stypes_content[stype] = stype_extr[3] #dict of content
objects_in_buf += 1
else:
_logging.error(f'{stype} ({stypes_dict[stype]}) blk not found')
# return None
objects_in_buf += 1
else:
if verbose:
_logging.error(f'{stype} blk not supported')
stypes = list(stypes_rows.keys())
n_stypes = len(stypes) #existing stypes only
if n_stypes == 0:
if verbose:
_logging.error('nothing found')
return None
return _BytesIO(snx_buffer), stypes_rows, stypes_form, stypes_content
def get_variance_factor(path_or_bytes):
snx_bytes = path2bytes(path_or_bytes)
stat_bytes = _snx_extract_blk(
snx_bytes=snx_bytes, blk_name="SOLUTION/STATISTICS", remove_header=True
)
if stat_bytes is not None:
stat_dict = dict(_RE_STATISTICS.findall(stat_bytes[0].decode()))
if "VARIANCE FACTOR" in stat_dict.keys():
return float(stat_dict["VARIANCE FACTOR"])
wsqsum = (
float(stat_dict["WEIGHTED SQUARE SUM OF O-C"])
if "WEIGHTED SQUARE SUM OF O-C" in stat_dict.keys()
else float(stat_dict["SQUARED SUM OF RESIDUALS"])
)
if "DEGREES OF FREEDOM" in stat_dict.keys():
return wsqsum / float(stat_dict["DEGREES OF FREEDOM"])
else:
return wsqsum / (
float(stat_dict["NUMBER OF OBSERVATIONS"])
- float(stat_dict["NUMBER OF UNKNOWNS"]))
def _get_snx_matrix(path_or_bytes,
stypes=('APR', 'EST'),
verbose=True):
'''
stypes = "APR","EST","NEQ"
APRIORY, ESTIMATE, NORMAL_EQUATION
Would want ot extract apriori in the very same run with only single parser call
If you use the INFO type this block should contain the normal equation matrix of the
constraints applied to your solution in SOLUTION/ESTIMATE.
n_elements is useful for the igs sinex files when matrix has missing end rows.\
Fetch it from estimates vector
'''
if isinstance(path_or_bytes, str):
snx_bytes = path2bytes(path_or_bytes)
else:
snx_bytes = path_or_bytes
n_elements = int(snx_bytes[60:65])
extracted = _snx_extract(snx_bytes=snx_bytes,
stypes=stypes,
obj_type='MATRIX',
verbose=verbose)
if extracted is not None:
snx_buffer, stypes_rows, stypes_form, stypes_content = extracted
else:
return None # not found
matrix_raw = _pd.read_csv(snx_buffer,
delim_whitespace=True,
dtype={0: _np.int16, 1: _np.int16})
#can be 4 and 5 columns; only 2 first int16
output = []
prev_idx = 0
for i in stypes_rows.keys():
idx = stypes_rows[i]
# Where to get the n-elements for the apriori matrix? Should be taken from estimates matrix
ma_sq = _matrix_raw2square(
matrix_raw=matrix_raw[prev_idx:prev_idx + idx],
stypes_form=stypes_form[i],
n_elements=n_elements)
output.append(ma_sq)
prev_idx += idx
return output,stypes_content
def snxdf2xyzdf(snxdf,unstack=True):
types_mask = snxdf.TYPE.isin(['STAX','STAY', 'STAZ', 'VELX', 'VELY', 'VELZ',]).values
snxdf.drop(index = snxdf.index.values[~types_mask],inplace=True)
snxdf['CODE_PT'] = snxdf.CODE.values + '_' + snxdf.PT.values.astype(object)
snx_df = snxdf.drop(columns=['CODE','PT','SOLN']).set_index(['CODE_PT', 'REF_EPOCH','TYPE'])
return snx_df.unstack(2) if unstack else snx_df
def _get_snx_vector(path_or_bytes, stypes=('EST','APR'), snx_format=True,verbose=True):
'''stypes = "APR","EST","NEQ"
APRIORY, ESTIMATE, NORMAL_EQUATION
'''
path = None
if isinstance(path_or_bytes, str):
path = path_or_bytes
snx_bytes = path2bytes(path)
elif isinstance(path_or_bytes, list):
path, stypes, snx_format,verbose = path_or_bytes
snx_bytes = path2bytes(path)
else:
snx_bytes = path_or_bytes
n_header = int(snx_bytes[60:65])
if stypes == ('NEQ'):
stypes = ('APR','NEQ')
#should always return NEQ vector with APR above it
if verbose:
_logging.info('Prepending APR')
stypes = _get_valid_stypes(stypes) # EST is always first as APR may have skips
extracted = _snx_extract(snx_bytes=snx_bytes, stypes=stypes, obj_type='VECTOR', verbose=verbose)
if extracted is None:
return None
snx_buffer, stypes_rows, stypes_form, stypes_content = extracted
try:
vector_raw = _pd.read_csv(
snx_buffer,
delim_whitespace=True,
comment=b'*',
header=None,
usecols=[0,1, 2, 3, 4, 5, 8, 9],
names=['INDEX','TYPE', 'CODE', 'PT', 'SOLN', 'REF_EPOCH', 'EST', 'STD'],
dtype={
0:int,
1: TYPE_CATEGORY,
2: object,
3: PT_CATEGORY,
4: 'category', #can not be int as may be '----'
5: object,
8: _np.float_,
9: _np.float_
},
index_col='INDEX'
)
except ValueError as _e:
if _e.args[0][:33] == 'could not convert string to float':
_logging.error(f'{path} data corrupted. Skipping')
return None
else:
raise _e
if path is not None:
del snx_buffer #need to test this better
vector_raw.index = vector_raw.index.values-1 #start with 0
output = []
prev_idx = 0
for i in range(len(stypes_rows)):
stype = stypes[i]
idx = stypes_rows[stype]
vec_df = (vector_raw[prev_idx:prev_idx + idx]).copy()
if i == 0:
vec_df.REF_EPOCH = _yydoysec2datetime(vec_df.REF_EPOCH,
recenter=True,
as_j2000=True)
else:
vec_df = vec_df.iloc[:, 5:]
if vec_df.shape[0]!=n_header:
vec_df = vec_df.reindex(_np.arange(start=0, stop=n_header),fill_value=0)
if stype in ['APR', 'NEQ']:
vec_df.rename(columns={'EST': stype}, inplace=True)
vec_df.drop(columns='STD', inplace=True)
output.append(vec_df)
prev_idx += idx
output =
|
_pd.concat(output, axis=1)
|
pandas.concat
|
# built-in
import os
import datetime
import pickle
# third-party
import numpy as np
import pandas as pd
# local
import utils
def filter_data(saving_dir, patients_info_dir, raw_data_dir, modalities=None, list_patients=None):
if list_patients is None:
list_patients = [patient_id for patient_id in os.listdir(raw_data_dir) if os.path.isdir(os.path.join(raw_data_dir, patient_id))]
for patient_id in list_patients:
pat = pickle.load(open(os.path.join(patients_info_dir, patient_id), 'rb'))
print(f'\n--- Checking patient {pat.id} ---')
if modalities is None:
modalities = list(pat.modalities.keys())
filter_pat_data(saving_dir, raw_data_dir, pat, modalities)
# ---------- AUXILIARY FUNCTIONS ---------- #
def filter_pat_data(saving_directory, start_directory, pat, modalities):
# confirm is patient is in the new directory and create new directory if not
if not os.path.isdir(os.path.join(saving_directory, pat.id)):
os.makedirs(os.path.join(saving_directory, pat.id))
for modality in modalities:
print(' Filtering --- ' + modality)
# get the file name of the modality, within the start directory
file_names = [file for file in os.listdir(os.path.join(start_directory, pat.id)) if modality in file]
for file_name in file_names:
if ('baseline' in file_name and f'filtered_b_data_{modality}.h5' in os.listdir(os.path.join(saving_directory, pat.id))):
print(f' modality was already filtered for baseline, this task will be ignored')
continue
if ('seizure' in file_name and f'filtered_s_data_{modality}.h5' in os.listdir(os.path.join(saving_directory, pat.id))):
print(f' modality was already filtered for seizure, this task will be ignored')
continue
# open the file as a dataframe
file = pd.read_pickle(os.path.join(start_directory, pat.id, file_name))
# create new dataframe
fs = pat.modalities[modality]
filtered_df = get_filtered_data(file, fs)
if 'baseline' in file_name:
letter = 'b'
elif 'seizure' in file_name:
letter = 's'
filtered_df['sz'] = file['sz']
# pickle.dump(filtered_df, open(os.path.join(saving_directory, pat.id, 'filtered_'+ letter +'_data_' + modality), 'wb'))
filtered_df.to_hdf(os.path.join(saving_directory, pat.id, 'filtered_'+ letter +'_data_' + modality + '.h5'), mode='w', key='df')
def get_filtered_data(df, fs, resolution='ms'):
# confirm if there are jumps in the timestamps
diff_time = np.diff(df.index).astype(f'timedelta64[{resolution}]')
diff_time = np.argwhere(diff_time != datetime.timedelta(milliseconds=np.floor((1/fs)*1000)))
start, end = 0, -1
filtered_df = pd.DataFrame(columns=df.columns)
if len(diff_time) != 0:
diff_time = np.append(diff_time, [len(df)-1])
for d,diff in enumerate(diff_time):
print(f' Filtering segment {d+1} of {len(diff_time)}')
end = diff+1
crop_df = df.iloc[start:end]
for m in [df.columns[0]]:
crop_signal = crop_df[m].values.reshape(-1)
signal = utils.filter_modality(crop_signal, m)
filtered_df = pd.concat((filtered_df,
|
pd.DataFrame(signal, index=df.index[start:end], columns=[m])
|
pandas.DataFrame
|
### <NAME> ###
### <EMAIL> ###
### v1 ###
#/PATH/TO/BUSCO/OUTPUT/FULL/TABLE/: 1.tsv = result of BUSCO run using reference 1 (e.g. embryophyta)
#/PATH/TO/BUSCO/OUTPUT/FULL/TABLE/: 2.tsv = result of BUSCO run using reference 2 (e.g. chlorophyta)
#/PATH/TO/BUSCO/OUTPUT/FULL/TABLE/: 3.tsv = result of BUSCO run using reference 3 (e.g. brasssicales)
#/PATH/TO/QUOD/OUTPUT/gene_dispensability_scores.csv
#imports
import plotly.graph_objs as go
from plotly.offline import plot
import scipy.stats, random
import numpy as np
import pandas as pd
import dabest
#read files
datei = open("/PATH/TO/BUSCO/OUTPUT/FULL/TABLE/1.tsv", "r")
busco_results = datei.readlines()
datei.close()
datei = open("/PATH/TO/QUOD/OUTPUT/gene_dispensability_scores.csv", "r")
ds_results = datei.readlines()
datei.close()
#extract genes and scores (BUSCO vs. non-BUSCO)
all_genesandscores = {}
all_genes = []
all_scores = []
for line in ds_results:
elements = line.strip().split(",")
gene = elements[0].split(".")[1]
all_genes.append(gene)
all_scores.append(float(elements[1]))
all_genesandscores[gene] = float(elements[1])
busco_results = busco_results[5:]
busco_genes = []
busco_scores = []
for line in busco_results:
try:
line = line.strip().split("\t")
if line[1] == "Complete":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
elif line[1] == "Duplicated":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
elif line[1] == "Fragmented":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
except KeyError:
pass
all_genesandscores2 = {}
all_genes2 = []
all_scores2 = []
for index, gene in enumerate(all_genes):
if gene not in busco_genes: #non-BUSCO genes
all_genes2.append(gene)
all_scores2.append(float(all_scores[index]))
all_genesandscores2[gene] = float(all_scores[index])
max_number_1 = max([value for value in list(all_scores2) if value != np.inf])
max_number_2 = max([value for value in list(busco_scores) if value != np.inf])
max_number = max(max_number_1, max_number_2)
Enon_BUSCO_plot_data = []
for number in sorted(list(all_scores2)):
if number >= max_number:
value = max_number
Enon_BUSCO_plot_data.append(value)
else:
Enon_BUSCO_plot_data.append(number)
EBUSCO_plot_data = []
for number in sorted(list(busco_scores)):
if number >= max_number:
value = max_number
EBUSCO_plot_data.append(value)
else:
EBUSCO_plot_data.append(number)
datei = open("/PATH/TO/BUSCO/OUTPUT/FULL/TABLE/2.tsv", "r")
busco_results = datei.readlines()
datei.close()
datei = open("/PATH/TO/QUOD/OUTPUT/gene_dispensability_scores.csv", "r")
ds_results = datei.readlines()
datei.close()
#extract genes and scores (BUSCO vs. non-BUSCO)
all_genesandscores = {}
all_genes = []
all_scores = []
for line in ds_results:
elements = line.strip().split(",")
gene = elements[0].split(".")[1]
all_genes.append(gene)
all_scores.append(float(elements[1]))
all_genesandscores[gene] = float(elements[1])
busco_results = busco_results[5:]
busco_genes = []
busco_scores = []
for line in busco_results:
try:
line = line.strip().split("\t")
if line[1] == "Complete":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
elif line[1] == "Duplicated":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
elif line[1] == "Fragmented":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
except KeyError:
pass
all_genesandscores2 = {}
all_genes2 = []
all_scores2 = []
for index, gene in enumerate(all_genes):
if gene not in busco_genes: #non-BUSCO genes
all_genes2.append(gene)
all_scores2.append(float(all_scores[index]))
all_genesandscores2[gene] = float(all_scores[index])
max_number_1 = max([value for value in list(all_scores2) if value != np.inf])
max_number_2 = max([value for value in list(busco_scores) if value != np.inf])
max_number = max(max_number_1, max_number_2)
Cnon_BUSCO_plot_data = []
for number in sorted(list(all_scores2)):
if number >= max_number:
value = max_number
Cnon_BUSCO_plot_data.append(value)
else:
Cnon_BUSCO_plot_data.append(number)
CBUSCO_plot_data = []
for number in sorted(list(busco_scores)):
if number >= max_number:
value = max_number
CBUSCO_plot_data.append(value)
else:
CBUSCO_plot_data.append(number)
datei = open("/PATH/TO/BUSCO/OUTPUT/FULL/TABLE/3.tsv", "r")
busco_results = datei.readlines()
datei.close()
datei = open("/PATH/TO/QUOD/OUTPUT/gene_dispensability_scores.csv", "r")
ds_results = datei.readlines()
datei.close()
#extract genes and scores (BUSCO vs. non-BUSCO)
all_genesandscores = {}
all_genes = []
all_scores = []
for line in ds_results:
elements = line.strip().split(",")
gene = elements[0].split(".")[1]
all_genes.append(gene)
all_scores.append(float(elements[1]))
all_genesandscores[gene] = float(elements[1])
busco_results = busco_results[5:]
busco_genes = []
busco_scores = []
for line in busco_results:
try:
line = line.strip().split("\t")
if line[1] == "Complete":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
elif line[1] == "Duplicated":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
elif line[1] == "Fragmented":
busco_genes.append(line[2])
busco_scores.append(all_genesandscores[line[2]])
except KeyError:
pass
all_genesandscores2 = {}
all_genes2 = []
all_scores2 = []
for index, gene in enumerate(all_genes):
if gene not in busco_genes: #non-BUSCO genes
all_genes2.append(gene)
all_scores2.append(float(all_scores[index]))
all_genesandscores2[gene] = float(all_scores[index])
max_number_1 = max([value for value in list(all_scores2) if value != np.inf])
max_number_2 = max([value for value in list(busco_scores) if value != np.inf])
max_number = max(max_number_1, max_number_2)
Bnon_BUSCO_plot_data = []
for number in sorted(list(all_scores2)):
if number >= max_number:
value = max_number
Bnon_BUSCO_plot_data.append(value)
else:
Bnon_BUSCO_plot_data.append(number)
BBUSCO_plot_data = []
for number in sorted(list(busco_scores)):
if number >= max_number:
value = max_number
BBUSCO_plot_data.append(value)
else:
BBUSCO_plot_data.append(number)
#Levene's test to test for equal variances: Is the variance larger for one distribution (e.g. non-BUSCOs)?
#output: test statistic, p-value
import numpy as np
print("BUSCOs, non-BUSCOs (variance)")
print("1")
print(np.var(EBUSCO_plot_data))
print(np.var(Enon_BUSCO_plot_data))
print(scipy.stats.levene(EBUSCO_plot_data,Enon_BUSCO_plot_data))
print("2")
print(np.var(CBUSCO_plot_data))
print(np.var(Cnon_BUSCO_plot_data))
print(scipy.stats.levene(CBUSCO_plot_data,Cnon_BUSCO_plot_data))
print("3")
print(np.var(BBUSCO_plot_data))
print(np.var(Bnon_BUSCO_plot_data))
print(scipy.stats.levene(BBUSCO_plot_data,Bnon_BUSCO_plot_data))
#dabest
dict_data = {"BUSCO (2)":
|
pd.Series(CBUSCO_plot_data)
|
pandas.Series
|
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD =
|
pd.Series(EMA_fast - EMA_slow,name='MACD')
|
pandas.Series
|
import pytest
from xarray import DataArray
import scipy.stats as st
from numpy import (
argmin,
array,
concatenate,
dot,
exp,
eye,
kron,
nan,
reshape,
sqrt,
zeros,
)
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame
from limix.qc import normalise_covariance
from limix.qtl import scan
from limix.stats import linear_kinship, multivariate_normal as mvn
def _test_qtl_scan_st(lik):
random = RandomState(0)
n = 30
ncovariates = 3
M = random.randn(n, ncovariates)
v0 = random.rand()
v1 = random.rand()
G = random.randn(n, 4)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = random.randn(ncovariates)
alpha = random.randn(G.shape[1])
m = M @ beta + G @ alpha
y = mvn(random, m, v0 * K + v1 * eye(n))
idx = [[0, 1], 2, [3]]
if lik == "poisson":
y = random.poisson(exp(y))
elif lik == "bernoulli":
y = random.binomial(1, 1 / (1 + exp(-y)))
elif lik == "probit":
y = random.binomial(1, st.norm.cdf(y))
elif lik == "binomial":
ntrials = random.randint(0, 30, len(y))
y = random.binomial(ntrials, 1 / (1 + exp(-y)))
lik = (lik, ntrials)
r = scan(G, y, lik=lik, idx=idx, K=K, M=M, verbose=False)
str(r)
str(r.stats.head())
str(r.effsizes["h2"].head())
str(r.h0.trait)
str(r.h0.likelihood)
str(r.h0.lml)
str(r.h0.effsizes)
str(r.h0.variances)
def test_qtl_scan_st():
_test_qtl_scan_st("normal")
_test_qtl_scan_st("poisson")
_test_qtl_scan_st("bernoulli")
_test_qtl_scan_st("probit")
_test_qtl_scan_st("binomial")
def test_qtl_scan_three_hypotheses_mt():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A0 = random.randn(ntraits, 1)
A1 = random.randn(ntraits, 2)
A01 = concatenate((A0, A1), axis=1)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A01.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A01, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, A0=A0, A1=A1, verbose=False)
str(r)
def test_qtl_scan_two_hypotheses_mt():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A0 = random.randn(ntraits, 1)
A1 = random.randn(ntraits, 2)
A01 = concatenate((A0, A1), axis=1)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A01.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A01, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, A1=A1, verbose=False)
str(r)
def test_qtl_scan_two_hypotheses_mt_A0A1_none():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A1 = eye(ntraits)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A1.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A1, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
Y = DataArray(Y, dims=["sample", "trait"], coords={"trait": ["WA", "Cx"]})
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, verbose=False)
df = r.effsizes["h2"]
df = df[df["test"] == 0]
assert_array_equal(df["trait"], ["WA"] * 3 + ["Cx"] * 3 + [None] * 4)
assert_array_equal(
df["env"], [None] * 6 + ["env1_WA", "env1_WA", "env1_Cx", "env1_Cx"]
)
str(r)
def test_qtl_scan_lmm():
random = RandomState(0)
nsamples = 50
G = random.randn(50, 100)
K = linear_kinship(G[:, 0:80], verbose=False)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 68:70]
result = scan(X, y, lik="normal", K=K, M=M, verbose=False)
pv = result.stats["pv20"]
ix_best_snp = argmin(array(pv))
M = concatenate((M, X[:, [ix_best_snp]]), axis=1)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, atol=1e-6)
def test_qtl_scan_lmm_nokinship():
random = RandomState(0)
nsamples = 50
G = random.randn(50, 100)
K = linear_kinship(G[:, 0:80], verbose=False)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 68:70]
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"].values
assert_allclose(pv[:2], [8.159539103135342e-05, 0.10807353641893498], atol=1e-5)
def test_qtl_scan_lmm_repeat_samples_by_index():
random = RandomState(0)
nsamples = 30
samples = ["sample{}".format(i) for i in range(nsamples)]
G = random.randn(nsamples, 100)
G =
|
DataFrame(data=G, index=samples)
|
pandas.DataFrame
|
import logging
from os.path import join
import numpy as np
import pandas as pd
import xgboost as xgb
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
def infer_missing(df, target_column, inference_type, figures_dir, verbose=False):
"""Imputed infered values for a columns with missing values
by gradient boosted trees regression or classification
Parameters
----------
df : pandas dataframe
target_column: string
The column to impute values for.
inference_type: string
The type of inference: 'reg' for regression, 'clf' for classification
figures_dir: filepath
File path to the directory where feature importance figures
will be stored.
verbose: bool
Returns
-------
df: pandas dataframe
The input dataframe completed with infered values.
"""
# TODO: Hyperopt the CV'ed version of this function
if inference_type not in ("reg", "clf"):
raise ValueError(inference_type)
# Remove some variables having the same prefix with target
# to prevent leaking data from added & related vars
target_prefix = target_column[:3]
input_columns = [c for c in df.columns if not c.startswith(target_prefix)]
# Make X, y
missing_mask = pd.isnull(df.loc[:, target_column])
y_full = df.loc[~missing_mask, target_column]
# One-hot encode string columns
X = pd.get_dummies(df.loc[:, input_columns], dummy_na=True)
X_missing = X.loc[missing_mask, :]
X_full = X.loc[~missing_mask, :]
ax, fig = plt.subplots(1, 1, figsize=rect_figsize)
y_full.hist(
bins="auto", normed=True, alpha=0.4, color="grey", label="Original values"
)
# Make train/test split
if inference_type == "clf":
# Some classes are rare, here we artificially change the labels
# to the nearest neighbourghs
labels, class_counts = np.unique(y_full, return_counts=True)
for i, (label, count) in enumerate(zip(labels, class_counts)):
if count < 2:
y_full[y_full == label] = labels[i - 1]
stratify = y_full
else:
try:
# Stratify by quantiles if possible
stratify, _ = pd.factorize(pd.qcut(y_full, 20, duplicates="drop"))
except ValueError:
stratify = None
try:
X_train, X_valid, y_train, y_valid = train_test_split(
X_full, y_full, test_size=0.5, random_state=seed, stratify=stratify
)
except ValueError:
logger.warning(
"[Imputation] Stratified split failed for {}".format(target_column)
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_full, y_full, test_size=0.5, random_state=seed, stratify=None
)
logger.info(
"[Imputation] Column {}, n_missing={}/{}, train/test={}/{}".format(
target_column, missing_mask.sum(), len(X), len(X_train), len(X_valid)
)
)
# Choose model
if inference_type == "clf":
booster = xgb.XGBClassifier(seed=seed)
else:
booster = xgb.XGBRegressor(seed=seed)
# booster = xgb.cv(param, dtrain, num_round, nfold=20, stratified=True,
# metrics=['error'], seed=seed,
# callbacks=[xgb.callback.print_evaluation(show_stdv=True),
# xgb.callback.early_stop(3)])
# Fit, predict
booster.fit(
X_train,
y_train,
early_stopping_rounds=1,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=False,
)
# Write back model prediction
preds = booster.predict(X_missing, ntree_limit=booster.best_iteration)
imputed_serie = df.loc[:, target_column].copy()
imputed_serie.loc[missing_mask] = preds
|
pd.Series(preds)
|
pandas.Series
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5":
|
pandas.StringDtype()
|
pandas.StringDtype
|
from pathlib import Path
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Series,
_testing as tm,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
from pandas.io.pytables import TableIterator
pytestmark = pytest.mark.single
def test_read_missing_key_close_store(setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(setup_path):
# GH 28699
with
|
ensure_clean_path(setup_path)
|
pandas.tests.io.pytables.common.ensure_clean_path
|
#Rule 27 - Email ID should be complete, it should not contain any space.
def email_id_validity(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Email_id_validity"
config=
|
pd.read_excel(configFile)
|
pandas.read_excel
|
##############################################################
# Author: <NAME>
##############################################################
'''
Module : create_kallisto_ec_count_matrix
Description : Create equivalence class matrix from kallisto.
Copyright : (c) <NAME>, Dec 2018
License : MIT
Maintainer : <EMAIL>
Portability : POSIX
Take equivalence class output from kallisto's batch mode
(matrix.ec) and create an EC matrix that can be used for DE/DTU
'''
import os
import argparse
import re
import pandas as pd
import numpy as np
import gc
parser = argparse.ArgumentParser()
parser.add_argument(dest='ec_file', help="Kallisto equivalence class file (matrix.ec).")
parser.add_argument(dest='counts_file', help="Kallisto counts file (matrix.tsv).")
parser.add_argument(dest='samples_file', help="Kallisto samples file (matrix.cells).")
parser.add_argument(dest='tx_ids_file',
help='''File containing one transcript ID per line,
in same order as the fasta reference used for kallisto.''')
parser.add_argument(dest='out_file', help="Output file.")
args = parser.parse_args()
ec_file = args.ec_file
counts_file = args.counts_file
samples_file = args.samples_file
tx_ids_file = args.tx_ids_file
out_file = args.out_file
ec_df = pd.read_csv(ec_file, header=None, sep='\t', names=['ec_names', 'tx_ids'])
counts = pd.read_csv(counts_file, header=None, sep='\t', names=['ec_names', 'sample_id', 'count'])
samples = pd.read_csv(samples_file, header=None, sep='\t')[0].values
tx_ids = pd.read_csv(tx_ids_file, header=None)[0].values
print('restructuring EC counts...')
counts = pd.merge(counts, ec_df, on='ec_names')
counts = counts.pivot_table(index=['ec_names', 'tx_ids'], columns=['sample_id'], fill_value=0)
counts = counts.reset_index()
counts.columns = counts.columns.droplevel()
counts.columns = np.concatenate([['ec_names', 'tx_ids'], samples])
print('separating transcript IDs...')
ec_tmp = ec_df[ec_df.ec_names.isin(counts.ec_names)]
tx_stack = ec_tmp['tx_ids'].str.split(',').apply(pd.Series,1).stack()
tx_stack =
|
pd.DataFrame(tx_stack, columns=['tx_id'])
|
pandas.DataFrame
|
import os
import zipfile
import logging
import urllib
import time
import pandas as pd
def load_dataset(datasets_folder, dataset_url= "http://files.grouplens.org/datasets/movielens/ml-latest-small.zip"):
"""
queries any of the movielens dataset and stores it into the desired folder
:param dataset_url: the url from where to fetch the dataset (from movielens.org)
:param dataset_path: the path on where to store the dataset
:return: the path to the dataset folder
"""
zipfile_name = os.path.basename(dataset_url)
dataset_path = os.path.join(datasets_folder, zipfile_name)
#if folder does not exists
if not os.path.exists(datasets_folder):
os.makedirs(datasets_folder)
# if the .zip file doesn't exist
if not os.path.isfile(dataset_path):
logging.info('downloading dataset %s', dataset_url)
zf = urllib.request.urlretrieve(dataset_url, dataset_path)
with zipfile.ZipFile(dataset_path, "r") as z: z.extractall(datasets_folder)
else:
logging.info('dataset was already downloaded')
# return the extracted folder that contains all the rating files
logging.info('dataset stored in: %s', os.path.splitext(dataset_path)[0])
return os.path.splitext(dataset_path)[0]
def load_personal_ratings(datasets_folder, ratings_file, customer_number):
"""
:param datasets_folder: the folder where the original dataset was stored
:param ratings_file: the file created with personal ratings
:param username: the username to be assigned
:return: a structure with the personal ratings, incluiding a userId
"""
# load personal ratings and format into the right format
my_ratings_file = os.path.join(datasets_folder, ratings_file)
my_ratings = pd.read_csv(my_ratings_file)
my_ratings['userId'] = customer_number
my_ratings['timestamp'] = int(round(time.time() * 1000))
my_ratings = my_ratings[['userId', 'movieId', 'rating', 'timestamp']]
logging.info("loaded %d personal ratings", len(my_ratings.index))
return my_ratings
def merge_datasets(dataset_folder, my_ratings_file):
"""
:param dataset_folder: folder was previously the dataset is downloaded to
:param my_ratings_file: where is the personal recommendations file stored
:return: a dataframe with the ratings (merges original ratings with personal ratings)
"""
# load original dataset ratings file
ratings_file = os.path.join(dataset_folder, 'ratings.csv')
ratings = pd.read_csv(ratings_file)
# append personal ratings to ratings dataframe from original dataset
customer_number = ratings.userId.max() + 1
my_ratings = load_personal_ratings(dataset_folder, my_ratings_file, customer_number=customer_number)
ratings = ratings.append(my_ratings)
# load movie metadata
movies_file = os.path.join(dataset_folder, 'movies.csv')
movies = pd.read_csv(movies_file)
logging.info("loaded %d movies", len(movies.index))
# lets use movie titles instead of id's to make things more human readable
ratings = ratings.merge(movies, on='movieId').drop(['genres','timestamp','movieId'],1)
ratings = ratings[['userId', 'title', 'rating']]
ratings.columns = ['customer', 'movie', 'rating']
logging.info("loaded %d ratings in total", len(ratings.index))
return [ratings, customer_number]
def import_imdb_ratings(imdb_exported_ratings_file, links_file, ratings_file):
"""
:param imdb_exported_ratings_file:
:param links_file:
:param ratings_file:
:return:
"""
# You can download the ratings from your imdb profile by clicking export
my_imdb_ratings = pd.read_csv(imdb_exported_ratings_file, usecols=[1,5,8])
my_imdb_ratings['const'] = my_imdb_ratings['const'].map(lambda x: str(x)[2:]) # strip the tt at the start
my_imdb_ratings['You rated'] = my_imdb_ratings['You rated'].map(lambda x: x/2) # move to 0-5 rating
# the movielens dataset has a links.csv files that we can use to match
links =
|
pd.read_csv(links_file)
|
pandas.read_csv
|
#encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:",time.time())
debug = False
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
nrows = 10000 * 1
train_df = shuffle(train_df, random_state=1234);
train_df = train_df.iloc[:nrows]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=nrows, parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", nrows=nrows, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=nrows, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=nrows, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=nrows, parse_dates=["date_from", "date_to"])
print("loading data done!")
train_user_ids = train_df.user_id.values
train_item_ids = train_df.item_id.values
train_item_ids = train_item_ids.reshape(len(train_item_ids), 1)
train_user_ids = train_item_ids.reshape(len(train_user_ids), 1)
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p','rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p','rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns = ['image_quality'])
incep_test_image_df = pd.DataFrame(test_features, columns = [f'image_quality'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns = ['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns = [f'blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns = ['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns = [f'whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns = ['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns = [f'dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
# =============================================================================
# new image data
# =============================================================================
print('adding average_pixel_width ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns = ['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns = [f'average_pixel_width'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_reds ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_reds = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_reds = x['average_reds']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_reds, columns = ['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns = [f'average_reds'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_blues ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_blues = x['average_blues']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_blues = x['average_blues']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_blues, columns = ['average_blues'])
incep_test_image_df = pd.DataFrame(test_average_blues, columns = [f'average_blues'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_greens ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_greens = x['average_greens']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_greens = x['average_greens']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_greens, columns = ['average_greens'])
incep_test_image_df = pd.DataFrame(test_average_greens, columns = [f'average_greens'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding widths ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_widths = x['widths']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_widths = x['widths']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_widths, columns = ['widths'])
incep_test_image_df = pd.DataFrame(test_widths, columns = [f'widths'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding heights ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_heights = x['heights']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_heights = x['heights']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_heights, columns = ['heights'])
incep_test_image_df = pd.DataFrame(test_heights, columns = [f'heights'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
#==============================================================================
# image features by Qifeng
#==============================================================================
print('adding image features ...')
with open('../input/train_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
#==============================================================================
# image features v2 by Qifeng
#==============================================================================
print('adding image features ...')
with open('../input/train_image_features_cspace_v2.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_LAB_Ls',\
'average_LAB_As',\
'average_LAB_Bs',\
'average_YCrCb_Ys',\
'average_YCrCb_Crs',\
'average_YCrCb_Cbs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace_v2.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_LAB_Ls',\
'average_LAB_As',\
'average_LAB_Bs',\
'average_YCrCb_Ys',\
'average_YCrCb_Crs',\
'average_YCrCb_Cbs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
# =============================================================================
# add geo info: https://www.kaggle.com/frankherfert/avito-russian-region-cities/data
# =============================================================================
#tmp = pd.read_csv("../input/avito_region_city_features.csv", usecols=["region", "city", "latitude","longitude"])
#train_df = train_df.merge(tmp, on=["city","region"], how="left")
#train_df["lat_long"] = train_df["latitude"]+train_df["longitude"]
#test_df = test_df.merge(tmp, on=["city","region"], how="left")
#test_df["lat_long"] = test_df["latitude"]+test_df["longitude"]
#del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"])
train_df = train_df.merge(tmp, on="region", how="left")
test_df = test_df.merge(tmp, on="region", how="left")
del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/city_population_wiki_v3.csv")
train_df = train_df.merge(tmp, on="city", how="left")
test_df = test_df.merge(tmp, on="city", how="left")
del tmp; gc.collect()
# =============================================================================
# Here Based on https://www.kaggle.com/bminixhofer/aggregated-features-lightgbm/code
# =============================================================================
all_samples = pd.concat([train_df,train_active,test_df,test_active]).reset_index(drop=True)
all_samples.drop_duplicates(["item_id"], inplace=True)
del train_active, test_active; gc.collect()
all_periods = pd.concat([train_periods,test_periods])
del train_periods, test_periods; gc.collect()
all_periods["days_up"] = (all_periods["date_to"] - all_periods["date_from"]).dt.days
gp = all_periods.groupby(["item_id"])[["days_up"]]
gp_df = pd.DataFrame()
gp_df["days_up_sum"] = gp.sum()["days_up"]
gp_df["times_put_up"] = gp.count()["days_up"]
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={"index": "item_id"})
all_periods.drop_duplicates(["item_id"], inplace=True)
all_periods = all_periods.merge(gp_df, on="item_id", how="left")
all_periods = all_periods.merge(all_samples, on="item_id", how="left")
gp = all_periods.groupby(["user_id"])[["days_up_sum", "times_put_up"]].mean().reset_index()\
.rename(index=str, columns={"days_up_sum": "avg_days_up_user",
"times_put_up": "avg_times_up_user"})
n_user_items = all_samples.groupby(["user_id"])[["item_id"]].count().reset_index() \
.rename(index=str, columns={"item_id": "n_user_items"})
gp = gp.merge(n_user_items, on="user_id", how="outer") #left
del all_samples, all_periods, n_user_items
gc.collect()
train_df = train_df.merge(gp, on="user_id", how="left")
test_df = test_df.merge(gp, on="user_id", how="left")
agg_cols = list(gp.columns)[1:]
del gp; gc.collect()
for col in agg_cols:
train_df[col].fillna(-1, inplace=True)
test_df[col].fillna(-1, inplace=True)
print("merging supplimentary data done!")
# =============================================================================
# done! go to the normal steps
# =============================================================================
def rmse(predictions, targets):
print("calculating RMSE ...")
return np.sqrt(((predictions - targets) ** 2).mean())
def text_preprocessing(text):
# text = str(text)
# text = text.lower()
# text = re.sub(r"(\\u[0-9A-Fa-f]+)",r"", text)
# text = re.sub(r"===",r" ", text)
# # https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
# text = " ".join(map(str.strip, re.split('(\d+)',text)))
# regex = re.compile(u'[^[:alpha:]]')
# text = regex.sub(" ", text)
# text = " ".join(text.split())
return text
@contextmanager
def feature_engineering(df):
# All the feature engineering here
def Do_Text_Hash(df):
print("feature engineering -> hash text ...")
df["text_feature"] = df.apply(lambda row: " ".join([str(row["param_1"]),
str(row["param_2"]), str(row["param_3"])]),axis=1)
df["text_feature_2"] = df.apply(lambda row: " ".join([str(row["param_2"]), str(row["param_3"])]),axis=1)
# df["title_description"] = df.apply(lambda row: " ".join([str(row["title"]), str(row["description"])]),axis=1)
print("feature engineering -> preprocess text ...")
df["text_feature"] = df["text_feature"].apply(lambda x: text_preprocessing(x))
df["text_feature_2"] = df["text_feature_2"].apply(lambda x: text_preprocessing(x))
df["description"] = df["description"].apply(lambda x: text_preprocessing(x))
df["title"] = df["title"].apply(lambda x: text_preprocessing(x))
# df["title_description"] = df["title_description"].apply(lambda x: text_preprocessing(x))
def Do_Datetime(df):
print("feature engineering -> date time ...")
df["wday"] = df["activation_date"].dt.weekday
df["wday"] =df["wday"].astype(np.uint8)
def Do_Label_Enc(df):
print("feature engineering -> lable encoding ...")
lbl = LabelEncoder()
cat_col = ["user_id", "region", "city", "parent_category_name",
"category_name", "user_type", "image_top_1",
"param_1", "param_2", "param_3","image",
]
for col in cat_col:
df[col] = lbl.fit_transform(df[col].astype(str))
gc.collect()
import string
count = lambda l1,l2: sum([1 for x in l1 if x in l2])
def Do_NA(df):
print("feature engineering -> fill na ...")
df["image_top_1"].fillna(-1,inplace=True)
df["image"].fillna("noinformation",inplace=True)
df["param_1"].fillna("nicapotato",inplace=True)
df["param_2"].fillna("nicapotato",inplace=True)
df["param_3"].fillna("nicapotato",inplace=True)
df["title"].fillna("nicapotato",inplace=True)
df["description"].fillna("nicapotato",inplace=True)
# price vs income
# df["price_vs_city_income"] = df["price"] / df["income"]
# df["price_vs_city_income"].fillna(-1, inplace=True)
df['average_HSV_Ss'].fillna(-1,inplace=True)
df['average_HSV_Vs'].fillna(-1,inplace=True)
df['average_LUV_Ls'].fillna(-1,inplace=True)
df['average_LUV_Us'].fillna(-1,inplace=True)
df['average_LUV_Vs'].fillna(-1,inplace=True)
df['average_HLS_Hs'].fillna(-1,inplace=True)
df['average_HLS_Ls'].fillna(-1,inplace=True)
df['average_HLS_Ss'].fillna(-1,inplace=True)
df['average_YUV_Ys'].fillna(-1,inplace=True)
df['average_YUV_Us'].fillna(-1,inplace=True)
df['average_YUV_Vs'].fillna(-1,inplace=True)
def Do_Count(df):
print("feature engineering -> do count ...")
# some count
df["num_desc_punct"] = df["description"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_desc_capE"] = df["description"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_desc_capP"] = df["description"].apply(lambda x: count(x, "[А-Я]")).astype(np.int16)
df["num_title_punct"] = df["title"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_title_capE"] = df["title"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_title_capP"] = df["title"].apply(lambda x: count(x, "[А-Я]")) .astype(np.int16)
# good, used, bad ... count
df["is_in_desc_хорошо"] = df["description"].str.contains("хорошо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_Плохо"] = df["description"].str.contains("Плохо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_новый"] = df["description"].str.contains("новый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_старый"] = df["description"].str.contains("старый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_используемый"] = df["description"].str.contains("используемый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатная_доставка"] = df["description"].str.contains("есплатная доставка").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатный_возврат"] = df["description"].str.contains("есплатный возврат").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_идеально"] = df["description"].str.contains("идеально").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_подержанный"] = df["description"].str.contains("подержанный").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_пСниженные_цены"] = df["description"].str.contains("Сниженные цены").map({True:1, False:0}).astype(np.uint8)
# new count 0604
# df["num_title_Exclamation"] = df["title"].apply(lambda x: count(x, "!")).astype(np.int16)
# df["num_title_Question"] = df["title"].apply(lambda x: count(x, "?")).astype(np.int16)
df["num_desc_Exclamation"] = df["description"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_desc_Question"] = df["description"].apply(lambda x: count(x, "?")).astype(np.int16)
def Do_Drop(df):
df.drop(["activation_date", "item_id"], axis=1, inplace=True)
def Do_Stat_Text(df):
print("feature engineering -> statistics in text ...")
textfeats = ["text_feature","text_feature_2","description", "title"]
for col in textfeats:
df[col + "_num_chars"] = df[col].apply(len).astype(np.int16)
df[col + "_num_words"] = df[col].apply(lambda comment: len(comment.split())).astype(np.int16)
df[col + "_num_unique_words"] = df[col].apply(lambda comment: len(set(w for w in comment.split()))).astype(np.int16)
df[col + "_words_vs_unique"] = (df[col+"_num_unique_words"] / df[col+"_num_words"] * 100).astype(np.float32)
gc.collect()
# choose which functions to run
Do_NA(df)
Do_Text_Hash(df)
Do_Label_Enc(df)
Do_Count(df)
Do_Datetime(df)
Do_Stat_Text(df)
Do_Drop(df)
gc.collect()
return df
def data_vectorize(df):
russian_stop = set(stopwords.words("russian"))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": "word",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
tfidf_para2 = {
"stop_words": russian_stop,
"analyzer": "char",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
# "min_df":5,
# "max_df":.9,
"smooth_idf": False
}
def get_col(col_name): return lambda x: x[col_name]
vectorizer = FeatureUnion([
("description", TfidfVectorizer(
ngram_range=(1, 1),
max_features=200000,#40000,18000
**tfidf_para,
preprocessor=get_col("description"))
),
# ("title_description", TfidfVectorizer(
# ngram_range=(1, 2),#(1,2)
# max_features=1800,#40000,18000
# **tfidf_para,
# preprocessor=get_col("title_description"))
# ),
("text_feature", CountVectorizer(
ngram_range=(1, 2),
preprocessor=get_col("text_feature"))
),
("title", TfidfVectorizer(
ngram_range=(1, 2),
**tfidf_para,
preprocessor=get_col("title"))
),
#新加入两个文本处理title2,title_char
("title2", TfidfVectorizer(
ngram_range=(1, 1),
**tfidf_para,
preprocessor=get_col("title"))
),
("title_char", TfidfVectorizer(
ngram_range=(1, 1),#(1, 4),(1,6)
max_features=16000,#16000
**tfidf_para2,
preprocessor=get_col("title"))
),
])
vectorizer.fit(df.to_dict("records"))
ready_full_df = vectorizer.transform(df.to_dict("records"))
tfvocab = vectorizer.get_feature_names()
df.drop(["text_feature", "text_feature_2", "description","title"], axis=1, inplace=True)
df.fillna(-1, inplace=True)
return df, ready_full_df, tfvocab
# =============================================================================
# Ridge feature https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
# =============================================================================
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
NFOLDS = 5#5
SEED = 42
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((len_train,))
oof_test = np.zeros((len_test,))
oof_test_skf = np.empty((NFOLDS, len_test))
for i, (train_index, test_index) in enumerate(kf):
# print('Ridege oof Fold {}'.format(i))
x_tr = x_train[train_index]
y = np.array(y)
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
full_df =
|
pd.concat([train_df, test_df])
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 01:04:21 2020
@author: shambhawi
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import preprocessing
from modAL.models import ActiveLearner, CommitteeRegressor
from modAL.disagreement import max_std_sampling
from joblib import Parallel, delayed
from copy import deepcopy
import math
from sklearn.feature_selection import RFECV
import time
from datetime import datetime
from statsmodels.tsa.arima_model import ARIMA
### dataset ###
#df=pd.read_csv("./dataset/train.csv")
df=
|
pd.read_csv("./dataset/LMTO_B_new.csv")
|
pandas.read_csv
|
import argparse
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
from tensorflow.python.ops.check_ops import assert_greater_equal_v2
import load_data
from tqdm import tqdm
import numpy as np
import pandas as pd
from math import e as e_VALUE
import tensorflow.keras.backend as Keras_backend
from sklearn.ensemble import RandomForestClassifier
from scipy.special import bdtrc
def func_CallBacks(Dir_Save=''):
mode = 'min'
monitor = 'val_loss'
# checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath= Dir_Save + '/best_model_weights.h5', monitor=monitor , verbose=1, save_best_only=True, mode=mode)
# Reduce_LR = tf.keras.callbacks.ReduceLROnPlateau(monitor=monitor, factor=0.1, min_delta=0.005 , patience=10, verbose=1, save_best_only=True, mode=mode , min_lr=0.9e-5 , )
# CSVLogger = tf.keras.callbacks.CSVLogger(Dir_Save + '/results.csv', separator=',', append=False)
EarlyStopping = tf.keras.callbacks.EarlyStopping( monitor = monitor,
min_delta = 0,
patience = 4,
verbose = 1,
mode = mode,
baseline = 0,
restore_best_weights = True)
return [EarlyStopping] # [checkpointer , EarlyStopping , CSVLogger]
def reading_terminal_inputs():
parser = argparse.ArgumentParser()
parser.add_argument("--epoch" , help="number of epochs")
parser.add_argument("--bsize" , help="batch size")
parser.add_argument("--max_sample" , help="maximum number of training samples")
parser.add_argument("--naug" , help="number of augmentations")
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
parser.add_argument("--architecture_name", help='architecture name')
args = parser.parse_args()
epoch = int(args.epoch) if args.epoch else 3
number_augmentation = int(args.naug) if args.naug else 3
bsize = int(args.bsize) if args.bsize else 100
max_sample = int(args.max_sample) if args.max_sample else 1000
architecture_name = str(args.architecture_name) if args.architecture_name else 'DenseNet121'
return epoch, bsize, max_sample, architecture_name, number_augmentation
def mlflow_settings():
"""
RUN UI with postgres and HPC:
REMOTE postgres server:
# connecting to remote server through ssh tunneling
ssh -L 5000:localhost:5432 <EMAIL>
# using the mapped port and localhost to view the data
mlflow ui --backend-store-uri postgresql://artinmajdi:1234@localhost:5000/chest_db --port 6789
RUN directly from GitHub or show experiments/runs list:
export MLFLOW_TRACKING_URI=http://127.0.0.1:5000
mlflow runs list --experiment-id <id>
mlflow run --no-conda --experiment-id 5 -P epoch=2 https://github.com/artinmajdi/mlflow_workflow.git -v main
mlflow run mlflow_workflow --no-conda --experiment-id 5 -P epoch=2
PostgreSQL server style
server = f'{dialect_driver}://{username}:{password}@{ip}/{database_name}' """
postgres_connection_type = { 'direct': ('5432', 'data7-db1.cyverse.org'),
'ssh-tunnel': ('5000', 'localhost')
}
port, host = postgres_connection_type['ssh-tunnel'] # 'direct' , 'ssh-tunnel'
username = "artinmajdi"
password = '<PASSWORD>'
database_name = "chest_db_v2"
dialect_driver = 'postgresql'
server = f'{dialect_driver}://{username}:{password}@{host}:{port}/{database_name}'
Artifacts = { 'hpc': 'sftp://mohammadsmajdi@file<EMAIL>iz<EMAIL>.<EMAIL>:/home/u29/mohammadsmajdi/projects/mlflow/artifact_store',
'data7_db1': 'sftp://[email protected]:/home/artinmajdi/mlflow_data/artifact_store'} # :temp2_data7_b
return server, Artifacts['data7_db1']
def architecture(architecture_name: str='DenseNet121', input_shape: list=[224,224,3], num_classes: int=14):
input_tensor=tf.keras.layers.Input(input_shape)
if architecture_name == 'custom':
model = tf.keras.layers.Conv2D(4, kernel_size=(3,3), activation='relu')(input_tensor)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(16, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(32, activation='relu')(model)
model = tf.keras.layers.Dense(num_classes , activation='softmax')(model)
return tf.keras.models.Model(inputs=model.input, outputs=[model])
else:
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
pooling='avg'
weights='imagenet'
include_top=False
if architecture_name == 'xception': model_architecture = tf.keras.applications.Xception
elif architecture_name == 'VGG16': model_architecture = tf.keras.applications.VGG16
elif architecture_name == 'VGG19': model_architecture = tf.keras.applications.VGG19
elif architecture_name == 'ResNet50': model_architecture = tf.keras.applications.ResNet50
elif architecture_name == 'ResNet50V2': model_architecture = tf.keras.applications.ResNet50V2
elif architecture_name == 'ResNet101': model_architecture = tf.keras.applications.ResNet101
elif architecture_name == 'ResNet101V2': model_architecture = tf.keras.applications.ResNet101V2
elif architecture_name == 'ResNet152': model_architecture = tf.keras.applications.ResNet152
elif architecture_name == 'ResNet152V2': model_architecture = tf.keras.applications.ResNet152V2
elif architecture_name == 'InceptionV3': model_architecture = tf.keras.applications.InceptionV3
elif architecture_name == 'InceptionResNetV2': model_architecture = tf.keras.applications.InceptionResNetV2
elif architecture_name == 'MobileNet': model_architecture = tf.keras.applications.MobileNet
elif architecture_name == 'MobileNetV2': model_architecture = tf.keras.applications.MobileNetV2
elif architecture_name == 'DenseNet121': model_architecture = tf.keras.applications.DenseNet121
elif architecture_name == 'DenseNet169': model_architecture = tf.keras.applications.DenseNet169
elif architecture_name == 'DenseNet201': model_architecture = tf.keras.applications.DenseNet201
elif int(list(tf.keras.__version__)[2]) >= 4:
if architecture_name == 'EfficientNetB0': model_architecture = tf.keras.applications.EfficientNetB0
elif architecture_name == 'EfficientNetB1': model_architecture = tf.keras.applications.EfficientNetB1
elif architecture_name == 'EfficientNetB2': model_architecture = tf.keras.applications.EfficientNetB2
elif architecture_name == 'EfficientNetB3': model_architecture = tf.keras.applications.EfficientNetB3
elif architecture_name == 'EfficientNetB4': model_architecture = tf.keras.applications.EfficientNetB4
elif architecture_name == 'EfficientNetB5': model_architecture = tf.keras.applications.EfficientNetB5
elif architecture_name == 'EfficientNetB6': model_architecture = tf.keras.applications.EfficientNetB6
elif architecture_name == 'EfficientNetB7': model_architecture = tf.keras.applications.EfficientNetB7
model = model_architecture( weights = weights,
include_top = include_top,
input_tensor = input_tensor,
input_shape = input_shape,
pooling = pooling) # ,classes=num_classes
KK = tf.keras.layers.Dense( num_classes, activation='sigmoid', name='predictions' )(model.output)
return tf.keras.models.Model(inputs=model.input,outputs=KK)
def weighted_bce_loss(W):
def func_loss(y_true,y_pred):
NUM_CLASSES = y_pred.shape[1]
loss = 0
for d in range(NUM_CLASSES):
y_true = tf.cast(y_true, tf.float32)
mask = tf.keras.backend.cast( tf.keras.backend.not_equal(y_true[:,d], -5),
tf.keras.backend.floatx() )
loss += W[d]*tf.keras.losses.binary_crossentropy( y_true[:,d] * mask,
y_pred[:,d] * mask )
return tf.divide( loss, tf.cast(NUM_CLASSES,tf.float32) )
return func_loss
def optimize(dir, train_dataset, valid_dataset, epochs, Info, architecture_name):
# architecture
model = architecture( architecture_name = architecture_name,
input_shape = list(Info.target_size) + [3] ,
num_classes = len(Info.pathologies) )
model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
loss = weighted_bce_loss(Info.class_weights), # tf.keras.losses.binary_crossentropy
metrics = [tf.keras.metrics.binary_accuracy] )
# optimization
history = model.fit( train_dataset,
validation_data = valid_dataset,
epochs = epochs,
steps_per_epoch = Info.steps_per_epoch,
validation_steps = Info.validation_steps,
verbose = 1,
use_multiprocessing = True) # ,callbacks=func_CallBacks(dir + '/model')
# saving the optimized model
model.save( dir + '/model/model.h5',
overwrite = True,
include_optimizer = False )
return model
def evaluate(dir: str, dataset: str='chexpert', batch_size: int=1000, model=tf.keras.Model()):
# Loading the data
Data, Info = load_data.load_chest_xray( dir = dir,
dataset = dataset,
batch_size = batch_size,
mode = 'test' )
score = measure_loss_acc_on_test_data( generator = Data.generator['test'],
model = model,
pathologies = Info.pathologies )
return score
def measure_loss_acc_on_test_data(generator, model, pathologies):
# Looping over all test samples
score_values = {}
NUM_CLASSES = len(pathologies)
generator.reset()
for j in tqdm(range(len(generator.filenames))):
x_test, y_test = next(generator)
full_path, x,y = generator.filenames[j] , x_test[0,...] , y_test[0,...]
x,y = x[np.newaxis,:] , y[np.newaxis,:]
# Estimating the loss & accuracy for instance
eval = model.evaluate(x=x, y=y,verbose=0,return_dict=True)
# predicting the labels for instance
pred = model.predict(x=x,verbose=0)
# Measuring the loss for each class
loss_per_class = [ tf.keras.losses.binary_crossentropy(y[...,d],pred[...,d]) for d in range(NUM_CLASSES)]
# saving all the infos
score_values[full_path] = {'full_path':full_path,'loss_avg':eval['loss'], 'acc_avg':eval['binary_accuracy'], 'pred':pred[0], 'pred_binary':pred[0] > 0.5, 'truth':y[0]>0.5, 'loss':np.array(loss_per_class), 'pathologies':pathologies}
# converting the outputs into panda dataframe
df = pd.DataFrame.from_dict(score_values).T
# resetting the index to integers
df.reset_index(inplace=True)
# # dropping the old index column
df = df.drop(['index'],axis=1)
return df
class Parent_Child():
def __init__(self, subj_info: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
"""
subject_info = {'pred':[], 'loss':[], 'pathologies':['Edema','Cardiomegaly',...]}
1. After creating a class:
SPC = Parent_Child(loss_dict, pred_dict, technique)
2. Update the parent child relationship:
SPC.set_parent_child_relationship(parent_name1, child_name_list1)
SPC.set_parent_child_relationship(parent_name2, child_name_list2)
3. Then update the loss and probabilities
SPC.update_loss_pred()
4. In order to see the updated loss and probabilities use below
loss_new_list = SPC.loss_dict_weighted or SPC.loss_list_weighted
pred_new_list = SPC.pred_dict_weighted or SPC.predlist_weighted
IMPORTANT NOTE:
If there are more than 2 generation; it is absolutely important to enter the subjects in order of seniority
gen1: grandparent (gen1)
gen1_subjx_children: parent (gen2)
gen2_subjx_children: child (gen3)
SPC = Parent_Child(loss_dict, pred_dict, technique)
SPC.set_parent_child_relationship(gen1_subj1, gen1_subj1_children)
SPC.set_parent_child_relationship(gen1_subj2, gen1_subj2_children)
. . .
SPC.set_parent_child_relationship(gen2_subj1, gen2_subj1_children)
SPC.set_parent_child_relationship(gen2_subj2, gen2_subj2_children)
. . .
SPC.update_loss_pred()
"""
self.subj_info = subj_info
self.technique = technique
self.all_parents: dict = {}
self.tuning_variables = tuning_variables
self.loss = subj_info.loss
self.pred = subj_info.pred
self.truth = subj_info.truth
self._convert_inputs_list_to_dict()
def _convert_inputs_list_to_dict(self):
self.loss_dict = {disease:self.subj_info.loss[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.pred_dict = {disease:self.subj_info.pred[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.truth_dict = {disease:self.subj_info.truth[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.loss_dict_weighted = self.loss_dict
self.pred_dict_weighted = self.pred_dict
def set_parent_child_relationship(self, parent_name: str='parent_name', child_name_list: list=[]):
self.all_parents[parent_name] = child_name_list
def update_loss_pred(self):
"""
techniques:
1: coefficinet = (1 + parent_loss)
2: coefficinet = (2 * parent_pred)
3: coefficient = (2 * parent_pred)
1: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
2: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
3. loss_new = loss_old * coefficient
"""
for parent_name in self.all_parents:
self._update_loss_for_children(parent_name)
self._convert_outputs_to_list()
def _convert_outputs_to_list(self):
self.loss_new = np.array([self.loss_dict_weighted[disease] for disease in self.subj_info.pathologies])
self.pred_new = np.array([self.pred_dict_weighted[disease] for disease in self.subj_info.pathologies])
def _update_loss_for_children(self, parent_name: str='parent_name'):
parent_loss = self.loss_dict_weighted[parent_name]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
TV = self.tuning_variables[ self.technique ]
if TV['mode'] == 'truth': parent_truth_pred = parent_truth
elif TV['mode'] == 'pred': parent_truth_pred = parent_pred
else: parent_truth_pred = 1.0
if self.technique == 1: coefficient = TV['weight'] * parent_loss + TV['bias']
elif self.technique == 2: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
elif self.technique == 3: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
for child_name in self.all_parents[parent_name]:
new_child_loss = self._measure_new_child_loss(coefficient, parent_name, child_name)
self.loss_dict_weighted[child_name] = new_child_loss
self.pred_dict_weighted[child_name] = 1 - np.power(e_VALUE , -new_child_loss)
self.pred_dict[child_name] = 1 - np.power(e_VALUE , -self.loss_dict[child_name])
def _measure_new_child_loss(self, coefficient: float=0.0, parent_name: str='parent_name', child_name: str='child_name'):
TV = self.tuning_variables[ self.technique ]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
if TV['mode'] == 'truth': loss_activated = (parent_truth < 0.5 )
elif TV['mode'] == 'pred': loss_activated = (parent_pred < TV['parent_pred_threshold'] )
else: loss_activated = True
old_child_loss = self.loss_dict_weighted[child_name]
if self.technique == 1: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 2: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 3: new_child_loss = old_child_loss * coefficient
return new_child_loss
class Measure_InterDependent_Loss_Aim1_1(Parent_Child):
def __init__(self,score: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
score['loss_new'] = score['loss']
score['pred_new'] = score['pred']
self.score = score
self.technique = technique
for subject_ix in tqdm(self.score.index):
Parent_Child.__init__(self, subj_info=self.score.loc[subject_ix], technique=technique, tuning_variables=tuning_variables)
self.set_parent_child_relationship(parent_name='Lung Opacity' , child_name_list=['Pneumonia', 'Atelectasis','Consolidation','Lung Lesion', 'Edema'])
self.set_parent_child_relationship(parent_name='Enlarged Cardiomediastinum', child_name_list=['Cardiomegaly'])
self.update_loss_pred()
self.score.loss_new.loc[subject_ix] = self.loss_new
self.score.pred_new.loc[subject_ix] = self.pred_new
def apply_new_loss_techniques_aim1_1(pathologies: list=[], score: pd.DataFrame.dtypes={}, tuning_variables: dict={}):
L = len(pathologies)
accuracies = np.zeros((4,L))
measured_auc = np.zeros((4,L))
FR = list(np.zeros(4))
for technique in range(4):
# extracting the ouput predictions
if technique == 0:
FR[technique] = score
output = score.pred
else:
FR[technique] = Measure_InterDependent_Loss_Aim1_1(score=score, technique=technique, tuning_variables=tuning_variables)
output = FR[technique].score.pred_new
# Measuring accuracy
func = lambda x1, x2: [ (x1[j] > 0.5) == (x2[j] > 0.5) for j in range(len(x1))]
pred_acc = score.truth.combine(output,func=func).to_list()
pred_acc = np.array(pred_acc).mean(axis=0)
prediction_table = np.stack(score.pred)
truth_table = np.stack(score.truth)
for d in range(prediction_table.shape[1]):
fpr, tpr, thresholds = roc_curve(truth_table[:,d], prediction_table[:,d], pos_label=1)
measured_auc[technique, d] = auc(fpr, tpr)
accuracies[technique,:] = np.floor( pred_acc*1000 ) / 10
class Outputs:
def __init__(self,accuracies, measured_auc, FR, pathologies):
self.accuracy = self._converting_to_dataframe(input_table=accuracies , columns=pathologies)
self.auc = self._converting_to_dataframe(input_table=measured_auc, columns=pathologies)
self.details = FR
self.pathologies = pathologies
def _converting_to_dataframe(self, input_table, columns):
df = pd.DataFrame(input_table, columns=columns)
df['technique'] = ['original','1','2','3']
df = df.set_index('technique').T
return df
return Outputs(accuracies=accuracies, measured_auc=measured_auc, FR=FR,pathologies=pathologies)
def apply_nan_back_to_truth(truth, how_to_treat_nans):
# changing teh samples with uncertain truth label to nan
truth[ truth == -10] = np.nan
# how to treat the nan labels in the original dataset before measuring the average accuracy
if how_to_treat_nans == 'ignore': truth[ truth == -5] = np.nan
elif how_to_treat_nans == 'pos': truth[ truth == -5] = 1
elif how_to_treat_nans == 'neg': truth[ truth == -5] = 0
return truth
def measure_mean_accruacy_chexpert(truth, prediction, how_to_treat_nans):
""" prediction & truth: num_samples x num_classes """
pred_classes = prediction > 0.5
# truth_nan_applied = self._truth_with_nan_applied()
truth_nan_applied = apply_nan_back_to_truth(truth=truth, how_to_treat_nans=how_to_treat_nans)
# measuring the binary truth labels (the nan samples will be fixed below)
truth_binary = truth_nan_applied > 0.5
truth_pred_compare = (pred_classes == truth_binary).astype(float)
# replacing the nan samples back to their nan value
truth_pred_compare[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
accuracy = np.nanmean(truth_pred_compare, axis=0)*100
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
accuracy[np.isnan(accuracy)] = 0
accuracy = (accuracy*10).astype(int)/10
return accuracy
def measure_mean_uncertainty_chexpert(truth=np.array([]), uncertainty=np.array([]), how_to_treat_nans='ignore'):
""" uncertainty & truth: num_samples x num_classes """
# adding the nan values back to arrays
truth_nan_applied = apply_nan_back_to_truth(truth, how_to_treat_nans)
# replacing the nan samples back to their nan value
uncertainty[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
uncertainty_mean = np.nanmean(uncertainty , axis=0)
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
uncertainty_mean[np.isnan(uncertainty_mean)] = 0
uncertainty_mean = (uncertainty_mean*1000).astype(int)/1000
return uncertainty_mean
class Measure_Accuracy_Aim1_2():
def __init__(self, predict_accuracy_mode: bool=False , model: tf.keras.models.Model.dtype='' , generator=tf.keras.preprocessing.image.ImageDataGenerator() , how_to_treat_nans: str='ignore', uncertainty_type: str='std'):
"""
how_to_treat_nans:
ignore: ignoring the nan samples when measuring the average accuracy
pos: if integer number, it'll treat as postitive
neg: if integer number, it'll treat as negative """
self.predict_accuracy_mode = predict_accuracy_mode
self.how_to_treat_nans = how_to_treat_nans
self.generator = generator
self.model = model
self.uncertainty_type = uncertainty_type
self._setting_params()
def _setting_params(self):
self.full_data_length, self.num_classes = self.generator.labels.shape
self.batch_size = self.generator.batch_size
self.number_batches = int(np.ceil(self.full_data_length/self.batch_size))
self.truth = self.generator.labels.astype(float)
def loop_over_whole_dataset(self):
probs = np.zeros(self.generator.labels.shape)
# Looping over all batches
# Keras_backend.clear_session()
self.generator.reset()
np.random.seed(1)
for batch_index in tqdm(range(self.number_batches),disable=False):
# extracting the indexes for batch "batch_index"
self.generator.batch_index = batch_index
indexes = next(self.generator.index_generator)
# print(' extracting data -------')
self.generator.batch_index = batch_index
x, _ = next(self.generator)
# print(' predicting the labels -------')
probs[indexes,:] = self.model.predict(x,verbose=0)
# Measuring the accuracy over whole augmented dataset
if self.predict_accuracy_mode:
accuracy = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=probs.copy(), how_to_treat_nans=self.how_to_treat_nans)
return probs, accuracy
def loop_over_all_augmentations(self,number_augmentation: int=0):
self.number_augmentation = number_augmentation
self.probs_all_augs_3d = np.zeros((1 + number_augmentation , self.full_data_length , self.num_classes))
self.accuracy_all_augs_3d = np.zeros((1 + number_augmentation , self.num_classes))
# Looping over all augmentation scenarios
for ix_aug in range(number_augmentation):
print(f'augmentation {ix_aug}/{number_augmentation}')
probs, accuracy = self.loop_over_whole_dataset()
self.probs_all_augs_3d[ ix_aug,...] = probs
self.accuracy_all_augs_3d[ix_aug,...] = accuracy
# measuring the average probability over all augmented data
self.probs_avg_2d = np.mean( self.probs_all_augs_3d, axis=0)
if self.uncertainty_type == 'std':
self.probs_std_2d = np.std(self.probs_all_augs_3d, axis=0)
# Measuring the accruacy for new estimated probability for each sample over all augmented data
# self.accuracy_final = self._measure_mean_accruacy(self.probs_avg_2d)
# self.uncertainty_final = self._measure_mean_std(self.probs_std_2d)
self.accuracy_final = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=self.probs_avg_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
self.uncertainty_final = measure_mean_uncertainty_chexpert(truth=self.truth.copy(), uncertainty=self.probs_std_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
def apply_technique_aim_1_2(how_to_treat_nans='ignore', data_generator='', data_generator_aug='', model='', number_augmentation=3, uncertainty_type='std'):
print('running the evaluation on original non-augmented data')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
probs_2d_orig, old_accuracy = MA.loop_over_whole_dataset()
print(' running the evaluation on augmented data including the uncertainty measurement')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator_aug,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
MA.loop_over_all_augmentations(number_augmentation=number_augmentation)
final_results = { 'old-accuracy': old_accuracy,
'new-accuracy': MA.accuracy_final,
'std' : MA.uncertainty_final}
return probs_2d_orig, final_results, MA
def estimate_maximum_and_change(all_accuracies=np.array([]), pathologies=[]):
columns = ['old-accuracy', 'new-accuracy', 'std']
# creating a dataframe from accuracies
df = pd.DataFrame(all_accuracies , index=pathologies)
# adding the 'maximum' & 'change' columns
df['maximum'] = df.columns[ df.values.argmax(axis=1) ]
df['change'] = df[columns[1:]].max(axis=1) - df[columns[0]]
# replacing "0" values to "--" for readability
df.maximum[df.change==0.0] = '--'
df.change[df.change==0.0] = '--'
return df
# def apply_technique_aim_1_2_with_dataframe(how_to_treat_nans='ignore', pathologies=[], data_generator='', data_generator_aug='', model='', uncertainty_type='std'):
# outputs, MA = apply_technique_aim_1_2(how_to_treat_nans=how_to_treat_nans, data_generator=data_generator, data_generator_aug=data_generator_aug, model=model, uncertainty_type=uncertainty_type)
# df = estimate_maximum_and_change(all_accuracies=outputs, pathologies=pathologies)
# return df, outputs, MA
""" crowdsourcing technique aim 1_3 """
def apply_technique_aim_1_3(data={}, num_simulations=20, feature_columns=[], ARLS={}):
def assigning_worker_true_labels(seed_num=1, true=[], labelers_strength=0.5):
# setting the random seed
# np.random.seed(seed_num)
# number of samples and labelers/workers
num_samples = true.shape[0]
# finding a random number for each instance
true_label_assignment_prob = np.random.random(num_samples)
# samples that will have an inaccurate true label
false_samples = true_label_assignment_prob < 1 - labelers_strength
# measuring the new labels for each labeler/worker
worker_true = true > 0.5
worker_true[ false_samples ] = ~ worker_true[ false_samples ]
return worker_true
def assigning_random_labelers_strengths(num_labelers=10, low_dis=0.3, high_dis=0.9):
labeler_names = [f'labeler_{j}' for j in range(num_labelers)]
# if num_labelers > 1:
# ls1 = np.random.uniform( low = 0.1,
# high = 0.3,
# size = int(num_labelers/2))
# ls2 = np.random.uniform( low = 0.7,
# high = 0.9,
# size = num_labelers - int(num_labelers/2))
# labelers_strength = np.concatenate((ls1 , ls2),axis=0)
# else:
labelers_strength = np.random.uniform( low = low_dis,
high = high_dis,
size = num_labelers)
return pd.DataFrame( {'labelers_strength': labelers_strength}, index = labeler_names)
# TODO I should repeate this for multiple seed and average
np.random.seed(11)
# setting a random strength for each labeler/worker
labelers_strength = assigning_random_labelers_strengths( num_labelers = ARLS['num_labelers'],
low_dis = ARLS['low_dis'],
high_dis = ARLS['high_dis'])
predicted_labels_all_sims = {'train':{}, 'test':{}}
true_labels = {'train':pd.DataFrame(), 'test':pd.DataFrame()}
uncertainty = {'train':pd.DataFrame(), 'test':pd.DataFrame()}
for LB_index, LB in enumerate(tqdm(labelers_strength.index, desc='workers')):
# Initializationn
for mode in ['train', 'test']:
predicted_labels_all_sims[mode][LB] = {}
true_labels[mode]['truth'] = data[mode].true.copy()
""" Looping over all simulations. this is to measure uncertainty """
# extracting the simulated true labels based on the worker strength
true_labels['train'][LB] = assigning_worker_true_labels( seed_num = 0, # LB_index,
true = data['train'].true.values,
labelers_strength = labelers_strength.T[LB].values )
true_labels['test'][LB] = assigning_worker_true_labels( seed_num = 0, # LB_index,
true = data['test'].true.values,
labelers_strength = labelers_strength.T[LB].values )
for i in range(num_simulations):
# training a random forest on the aformentioned labels
RF = RandomForestClassifier( n_estimators = 5,
max_depth = 10,
random_state = i)
RF.fit( X = data['train'][feature_columns],
y = true_labels['train'][LB] )
# predicting the labels using trained networks for both train and test data
for mode in ['train', 'test']:
predicted_labels_all_sims[mode][LB][f'simulation_{i}'] = RF.predict( data[mode][feature_columns] )
# measuring the prediction and uncertainty values after MV over all simulations
for mode in ['train', 'test']:
# converting to dataframe
predicted_labels_all_sims[mode][LB] = pd.DataFrame(predicted_labels_all_sims[mode][LB], index=data[mode].index)
# predicted probability of each class after MV over all simulations
predicted_labels_all_sims[mode][LB]['mv'] = (predicted_labels_all_sims[mode][LB].mean(axis=1) > 0.5)
# uncertainty for each labeler over all simulations
uncertainty[mode][LB] = predicted_labels_all_sims[mode][LB].std(axis=1)
predicted_labels = { 'train':{}, 'test' :{} }
for mode in ['train', 'test']:
# reversing the order of simulations and labelers. NOTE: for the final experiment I should use simulation_0. if I use the mv, then because the augmented truths keeps changing in each simulation, then with enough simulations, I'll end up witht perfect labelers.
for i in range(num_simulations + 1):
SM = f'simulation_{i}' if i < num_simulations else 'mv'
predicted_labels[mode][SM] = pd.DataFrame()
for LB in [f'labeler_{j}' for j in range(ARLS['num_labelers'])]:
predicted_labels[mode][SM][LB] = predicted_labels_all_sims[mode][LB][SM]
labelers_strength['accuracy-test'] = 0
acc = {}
for i in range(ARLS['num_labelers']):
LB = f'labeler_{i}'
labelers_strength.loc[LB,'accuracy-test'] = ( predicted_labels['test']['mv'][LB] == true_labels['test'].truth ).mean()
return predicted_labels, uncertainty, true_labels, labelers_strength
def aim1_3_measuring_weights(labels_all_workers, uncertainty_all_workers):
# weights : num_labelers x num_methods
# prob_weighted : num_samples x num_labelers
prob_mv_binary = labels_all_workers.mean(axis=1) > 0.5
T1, T2, w_hat1, w_hat2 = {}, {}, {}, {}
for workers_name in labels_all_workers.columns:
T1[workers_name] = 1 - uncertainty_all_workers[workers_name]
T2[workers_name] = T1[workers_name].copy()
T2[workers_name][ labels_all_workers[workers_name].values != prob_mv_binary.values ] = 0
w_hat1[workers_name] = T1[workers_name].mean(axis=0)
w_hat2[workers_name] = T2[workers_name].mean(axis=0)
w_hat =
|
pd.DataFrame([w_hat1, w_hat2], index=['method1', 'method2'])
|
pandas.DataFrame
|
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
import flat_table
import pandas as pd
import plotly.graph_objects as go
from dash.dependencies import Output, Input
from plotly.subplots import make_subplots
bs_theme = 'https://codepen.io/chriddyp/pen/bWLwgP.css'
external_stylesheets = [bs_theme]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
def import_data():
with open('snapshots.json') as data:
read_content = json.load(data)
df = pd.json_normalize(read_content)
new_df = flat_table.normalize(df)
return new_df
df = import_data()
"""some variables"""
df['date'] =
|
pd.to_datetime(df['date'])
|
pandas.to_datetime
|
from datetime import datetime
import os
import pandas as pd
import requests
from typing import Tuple
AUTH_KEY = '' # Add in your api_key here
def api_call(api_key: str, city: str, state: str, country: str = 'us') -> dict:
'''
Makes request to openweather map with all parameters
and returns formatted dict with all necessary information
Params:
api_key: string of api_key provided by openweathermap api
city: city you want to query
state: state in which city resides
country: country in which city resides, defaults to US unless otherwise specified
For example, returns:
{
'city, state': 'austin, tx',
'dt': '2021-09-19 00:01:07',
'feels_like': 95.14,
'humidity': 43,
'pressure': 1011,
'temp': 92.05,
'temp_max': 95.27,
'temp_min': 88.29
}
'''
weather_url = 'https://api.openweathermap.org/data/2.5/weather?q={CITY},{STATE},{COUNTRY}&appid={API_KEY}&units=imperial&dt' \
.format(CITY=city, STATE=state, COUNTRY=country, API_KEY=api_key)
response = requests.get(weather_url).json()
weather = response['main']
weather['city, state'] = city + ", " + state
weather['dt'] = datetime.utcfromtimestamp(response['dt']).strftime('%Y-%m-%d')
weather = [weather]
return weather
def _init_df(csv_path: str) -> pd.DataFrame:
'''
Takes in csv (if it exists) and returns Dataframe.
If it doesn't exist, it initializes an empty dataframe
Params:
csv_path: csv file path for all collected weather data so far
'''
try:
if os.path.exists(csv_path):
df = pd.read_csv(csv_path)
except:
df = pd.DataFrame()
def _column_renamer(df: pd.DataFrame, weather_list: list) -> Tuple[pd.DataFrame, str]:
'''
Takes in dataframe and returns a renamed dataframe
according to the city the data is for
Params:
df: input dataframe
weather_list: list of dict containing weather data and city name
'''
city_name = weather_list[0]['city, state'].split(',')[0]
df.columns = city_name + ' ' + df.columns
return df, city_name
def _column_creator(city_name1: str, city_name2: str, attribute: str) -> Tuple[str, str]:
'''
Helper function to create the names needed to do diff calculations in update_df
Params:
city_name: city name passed in
attribute: attribute you wish to track from original weather data
'''
return city_name1 + ' ' + attribute, city_name2 + ' ' + attribute
def update_df(weather1: list, weather2: list, csv_path: str = None) -> None:
'''
Reads in csv_path to a dataframe and updates that dataframe with today's combined weather for comparison
Params:
weather1: list[dict] of weather for one city
weather2: list[dict] of weather for the other city
csv_path: if adding to an existing csv, defaults to none
'''
old_df = _init_df(csv_path)
weather_one_df =
|
pd.DataFrame(weather1)
|
pandas.DataFrame
|
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
pa_version_under2p0,
pa_version_under4p0,
)
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
def test_startswith_endswith_non_str_patterns(pattern):
# GH3485
ser = Series(["foo", "bar"])
msg = f"expected a string object, not {type(pattern).__name__}"
with pytest.raises(TypeError, match=msg):
ser.str.startswith(pattern)
with pytest.raises(TypeError, match=msg):
ser.str.endswith(pattern)
def assert_series_or_index_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else: # Index
tm.assert_index_equal(left, right)
def test_iter():
# GH3638
strs = "google", "wikimedia", "wikipedia", "wikitravel"
ser = Series(strs)
with tm.assert_produces_warning(FutureWarning):
for s in ser.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ser.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, str) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == "l"
def test_iter_empty(any_string_dtype):
ser = Series([], dtype=any_string_dtype)
i, s = 100, 1
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(any_string_dtype):
ser = Series(["a"], dtype=any_string_dtype)
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert not i
tm.assert_series_equal(ser, s)
def test_iter_object_try_string():
ser = Series(
[
slice(None, np.random.randint(10), np.random.randint(10, 20))
for _ in range(4)
]
)
i, s = 100, "h"
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert i == 100
assert s == "h"
# test integer/float dtypes (inferred by constructor) and mixed
def test_count(any_string_dtype):
ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype)
result = ser.str.count("f[o]+")
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
expected = Series([1, 2, np.nan, 4], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_count_mixed_object():
ser = Series(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
result = ser.str.count("a")
expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_repeat(any_string_dtype):
ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype)
result = ser.str.repeat(3)
expected = Series(
["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
result = ser.str.repeat([1, 2, 3, 4, 5, 6])
expected = Series(
["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
def test_repeat_mixed_object():
ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = ser.str.repeat(3)
expected = Series(
["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]])
def test_repeat_with_null(any_string_dtype, arg, repeat):
# GH: 31632
ser = Series(["a", arg], dtype=any_string_dtype)
result = ser.str.repeat([3, repeat])
expected = Series(["aaa", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_empty_str_methods(any_string_dtype):
empty_str = empty = Series(dtype=any_string_dtype)
if any_string_dtype == "object":
empty_int = Series(dtype="int64")
empty_bool = Series(dtype=bool)
else:
empty_int = Series(dtype="Int64")
empty_bool = Series(dtype="boolean")
empty_object = Series(dtype=object)
empty_bytes = Series(dtype=object)
empty_df = DataFrame()
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert "" == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.contains("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.startswith("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.endswith("a"))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.replace("a", "b"))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.match("^a"))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=any_string_dtype),
empty.str.extract("()", expand=True),
)
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=True),
)
tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=False),
)
tm.assert_frame_equal(empty_df, empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(""))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_object, empty_str.str.findall("a"))
tm.assert_series_equal(empty_int, empty.str.find("a"))
tm.assert_series_equal(empty_int, empty.str.rfind("a"))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_object, empty.str.split("a"))
tm.assert_series_equal(empty_object, empty.str.rsplit("a"))
tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.strip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.lstrip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_object, empty_bytes.str.decode("ascii"))
tm.assert_series_equal(empty_bytes, empty.str.encode("ascii"))
# ismethods should always return boolean (GH 29624)
tm.assert_series_equal(empty_bool, empty.str.isalnum())
tm.assert_series_equal(empty_bool, empty.str.isalpha())
tm.assert_series_equal(empty_bool, empty.str.isdigit())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under2p0,
):
tm.assert_series_equal(empty_bool, empty.str.isspace())
tm.assert_series_equal(empty_bool, empty.str.islower())
tm.assert_series_equal(empty_bool, empty.str.isupper())
tm.assert_series_equal(empty_bool, empty.str.istitle())
tm.assert_series_equal(empty_bool, empty.str.isnumeric())
tm.assert_series_equal(empty_bool, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize("NFC"))
table = str.maketrans("a", "b")
tm.assert_series_equal(empty_str, empty.str.translate(table))
@pytest.mark.parametrize(
"method, expected",
[
("isalnum", [True, True, True, True, True, False, True, True, False, False]),
("isalpha", [True, True, True, False, False, False, True, False, False, False]),
(
"isdigit",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isnumeric",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isspace",
[False, False, False, False, False, False, False, False, False, True],
),
(
"islower",
[False, True, False, False, False, False, False, False, False, False],
),
(
"isupper",
[True, False, False, False, True, False, True, False, False, False],
),
(
"istitle",
[True, False, True, False, True, False, False, False, False, False],
),
],
)
def test_ismethods(method, expected, any_string_dtype):
ser = Series(
["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "], dtype=any_string_dtype
)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]"
and pa_version_under2p0
and method == "isspace",
):
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, True, True, False, True, True, False]),
("isdecimal", [False, True, False, False, False, True, False]),
],
)
def test_isnumeric_unicode(method, expected, any_string_dtype):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
ser = Series(["A", "3", "¼", "★", "፸", "3", "four"], dtype=any_string_dtype)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, np.nan, True, False, np.nan, True, False]),
("isdecimal", [False, np.nan, False, False, np.nan, True, False]),
],
)
def test_isnumeric_unicode_missing(method, expected, any_string_dtype):
values = ["A", np.nan, "¼", "★", np.nan, "3", "four"]
ser = Series(values, dtype=any_string_dtype)
expected_dtype = "object" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip(any_string_dtype):
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = ser.str.split("_").str.join("_")
expected = ser.astype(object)
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip_mixed_object():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.split("_").str.join("_")
expected = Series(
["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_len(any_string_dtype):
ser = Series(
["foo", "fooo", "fooooo", np.nan, "fooooooo", "foo\n", "あ"],
dtype=any_string_dtype,
)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.len()
expected_dtype = "float64" if any_string_dtype == "object" else "Int64"
expected = Series([3, 4, 6, np.nan, 8, 4, 1], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_len_mixed():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.len()
expected = Series([3, np.nan, 13, np.nan, np.nan, 3, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method,sub,start,end,expected",
[
("index", "EF", None, None, [4, 3, 1, 0]),
("rindex", "EF", None, None, [4, 5, 7, 4]),
("index", "EF", 3, None, [4, 3, 7, 4]),
("rindex", "EF", 3, None, [4, 5, 7, 4]),
("index", "E", 4, 8, [4, 5, 7, 4]),
("rindex", "E", 0, 5, [4, 3, 1, 4]),
],
)
def test_index(method, sub, start, end, index_or_series, any_string_dtype, expected):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
expected_dtype = np.int64 if any_string_dtype == "object" else "Int64"
expected = index_or_series(expected, dtype=expected_dtype)
result = getattr(obj.str, method)(sub, start, end)
if index_or_series is Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)(sub, start, end) for item in obj]
assert list(result) == expected
def test_index_not_found_raises(index_or_series, any_string_dtype):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
with pytest.raises(ValueError, match="substring not found"):
obj.str.index("DE")
@pytest.mark.parametrize("method", ["index", "rindex"])
def test_index_wrong_type_raises(index_or_series, any_string_dtype, method):
obj = index_or_series([], dtype=any_string_dtype)
msg = "expected a string object, not int"
with pytest.raises(TypeError, match=msg):
getattr(obj.str, method)(0)
@pytest.mark.parametrize(
"method, exp",
[
["index", [1, 1, 0]],
["rindex", [3, 1, 2]],
],
)
def test_index_missing(any_string_dtype, method, exp):
ser = Series(["abcb", "ab", "bcbe", np.nan], dtype=any_string_dtype)
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
result = getattr(ser.str, method)("b")
expected = Series(exp + [np.nan], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_pipe_failures(any_string_dtype):
# #2119
ser = Series(["A|B|C"], dtype=any_string_dtype)
result = ser.str.split("|")
expected = Series([["A", "B", "C"]], dtype=object)
tm.assert_series_equal(result, expected)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.replace("|", " ", regex=False)
expected = Series(["A B C"], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, step, expected",
[
(2, 5, None, ["foo", "bar", np.nan, "baz"]),
(0, 3, -1, ["", "", np.nan, ""]),
(None, None, -1, ["owtoofaa", "owtrabaa", np.nan, "xuqzabaa"]),
(3, 10, 2, ["oto", "ato", np.nan, "aqx"]),
(3, 0, -1, ["ofa", "aba", np.nan, "aba"]),
],
)
def test_slice(start, stop, step, expected, any_string_dtype):
ser = Series(["aafootwo", "aabartwo", np.nan, "aabazqux"], dtype=any_string_dtype)
result = ser.str.slice(start, stop, step)
expected = Series(expected, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, step, expected",
[
(2, 5, None, ["foo", np.nan, "bar", np.nan, np.nan, np.nan, np.nan, np.nan]),
(4, 1, -1, ["oof", np.nan, "rab", np.nan, np.nan, np.nan, np.nan, np.nan]),
],
)
def test_slice_mixed_object(start, stop, step, expected):
ser = Series(["aafootwo", np.nan, "aabartwo", True, datetime.today(), None, 1, 2.0])
result = ser.str.slice(start, stop, step)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start,stop,repl,expected",
[
(2, 3, None, ["shrt", "a it longer", "evnlongerthanthat", "", np.nan]),
(2, 3, "z", ["shzrt", "a zit longer", "evznlongerthanthat", "z", np.nan]),
(2, 2, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),
(2, 1, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),
(-1, None, "z", ["shorz", "a bit longez", "evenlongerthanthaz", "z", np.nan]),
(None, -2, "z", ["zrt", "zer", "zat", "z", np.nan]),
(6, 8, "z", ["shortz", "a bit znger", "evenlozerthanthat", "z", np.nan]),
(-10, 3, "z", ["zrt", "a zit longer", "evenlongzerthanthat", "z", np.nan]),
],
)
def test_slice_replace(start, stop, repl, expected, any_string_dtype):
ser = Series(
["short", "a bit longer", "evenlongerthanthat", "", np.nan],
dtype=any_string_dtype,
)
expected = Series(expected, dtype=any_string_dtype)
result = ser.str.slice_replace(start, stop, repl)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["aa", "bb", np.nan, "cc"]],
["lstrip", ["aa ", "bb \n", np.nan, "cc "]],
["rstrip", [" aa", " bb", np.nan, "cc"]],
],
)
def test_strip_lstrip_rstrip(any_string_dtype, method, exp):
ser = Series([" aa ", " bb \n", np.nan, "cc "], dtype=any_string_dtype)
result = getattr(ser.str, method)()
expected = Series(exp, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["aa", np.nan, "bb"]],
["lstrip", ["aa ", np.nan, "bb \t\n"]],
["rstrip", [" aa", np.nan, " bb"]],
],
)
def test_strip_lstrip_rstrip_mixed_object(method, exp):
ser = Series([" aa ", np.nan, " bb \t\n", True, datetime.today(), None, 1, 2.0])
result = getattr(ser.str, method)()
expected = Series(exp + [np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["ABC", " BNSD", "LDFJH "]],
["lstrip", ["ABCxx", " BNSD", "LDFJH xx"]],
["rstrip", ["xxABC", "xx BNSD", "LDFJH "]],
],
)
def test_strip_lstrip_rstrip_args(any_string_dtype, method, exp):
ser = Series(["xxABCxx", "xx BNSD", "LDFJH xx"], dtype=any_string_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = getattr(ser.str, method)("x")
expected = Series(exp, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"prefix, expected", [("a", ["b", " b c", "bc"]), ("ab", ["", "a b c", "bc"])]
)
def test_removeprefix(any_string_dtype, prefix, expected):
ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)
result = ser.str.removeprefix(prefix)
ser_expected = Series(expected, dtype=any_string_dtype)
tm.assert_series_equal(result, ser_expected)
@pytest.mark.parametrize(
"suffix, expected", [("c", ["ab", "a b ", "b"]), ("bc", ["ab", "a b c", ""])]
)
def test_removesuffix(any_string_dtype, suffix, expected):
ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)
result = ser.str.removesuffix(suffix)
ser_expected =
|
Series(expected, dtype=any_string_dtype)
|
pandas.Series
|
# Function of the Ontology Storing
import pandas as pd
import spacy
import codecs
import os
def ontology(inputPath, outputPath, numFiles, saveMode):
# Language model
nlp = spacy.load('en_core_web_lg')
# Verbs that go with prepositions
with open('C:/Users/anast/Desktop/Thesis/MachineLearning/verbList.txt') as v:
verbList = v.read().splitlines()
for k in range(1, numFiles + 1):
# Import Dataset
if numFiles == 1:
data = pd.read_csv(f"{inputPath}", header=None, sep='\t')
else:
data = pd.read_csv(f"{inputPath}/{k}.txt", header=None, sep='\t')
# print('File: ', k)
dataTransformed = []
# For every sentence in the file apply the below algorithm
for i in range(0, data.__len__()):
# print(i)
text = data.iloc[i, 0]
# print(text)
doc = nlp(text)
# Lists Definition
# Actor
actors = []
# Action
actions = []
actionsPositions = []
# Object
objects = []
objectsPositions = []
actionsWithObjects = []
actionsWithObjectsLemma = []
# ---- Actors ----
for j in range(0, doc.__len__()):
token = doc[j]
if token.pos_ == "DET":
continue
else:
actors.append(token.text)
if token.dep_ == "nsubj":
firstSubjectPos = j
break
# Check for second actor
secondActorFlag = False
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "conj" and token.head.text == doc[firstSubjectPos].text:
secondActorFlag = True
secondSubjectPos = j
break
if secondActorFlag:
for j in range(firstSubjectPos + 1, secondSubjectPos):
token = doc[j]
actors.append(token.text)
# Make the actors list a string
actorsString = " ".join(actors)
actorsString = actorsString.lower()
# print(actors)
# Actor Lemma
txt = nlp(actorsString)
actorsLemma = []
for tok in txt:
actorsLemma.append(tok.lemma_)
actorsLemmaString = " ".join(actorsLemma)
# ---- Action ----
for j in range(0, doc.__len__()):
token = doc[j]
if token.pos_ == "VERB" and token.dep_ == "xcomp" and token.head.text == "able":
actions.append(token.text)
actionsPositions.append(j)
if token.pos_ == "VERB" and token.dep_ == "acl" and token.head.text == "ability":
actions.append(token.text)
actionsPositions.append(j)
# Case: Action detected is the verb have, causative or passive voice
# probably works for a bunch of cases
tempObjFlag = False
for action in actions:
# if action in ["have", "know"]:
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ in ["ccomp", "xcomp"] and token.head.text == action:
actions.append(token.text)
actionsPositions.append(j)
verbList.append(token.text)
if token.dep_ == "dobj" and token.head.text == action:
tempObjFlag = True
tempObjPos = j
if tempObjFlag:
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "relcl" and token.head.text == doc[tempObjPos].text:
actions.append(token.text)
actionsPositions.append(j)
# Passive voice
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "auxpass":
auxpass = token.head.text
if auxpass not in actions:
actions.append(auxpass)
for m in range(0, doc.__len__()):
if doc[m].text == auxpass:
actionsPositions.append(m)
# Find the other actions
for action in actions:
for j in range(0, doc.__len__()):
token = doc[j]
if token.pos_ == "VERB" and token.dep_ == "conj" and token.head.text == action:
actions.append(token.text)
actionsPositions.append(j)
# Find actions in subordinating sentences with when/where/which etc
for action in actions:
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "advcl" and token.head.text == action:
actions.append(token.text)
actionsPositions.append(j)
# Find actions that are in this format select/deselect -> finds deselect
for j in actionsPositions:
if j + 1 >= doc.__len__() or j + 2 >= doc.__len__() or j - 1 < 0 or j - 2 < 0:
continue
else:
if doc[j + 1].text == "/":
actions.append(doc[j + 2].text)
elif doc[j - 1].text == "/":
actions.append(doc[j - 2].text)
# Special case: log in,log out, sign in, sign up, sign out
# value -> position of the action in the sentence
# count -> position of the action in the actions and actionsPositions lists
for count, value in enumerate(actionsPositions):
if value - 1 < doc.__len__():
if actions[count] in ['log', 'sign'] and doc[value + 1].text in ['in', 'up', 'out']:
actions[count] = actions[count] + " " + doc[value + 1].text
actionsString = ",".join(actions)
actionsString = actionsString.lower()
# Lemmatized actions
actionLemma = []
for action in actions:
txt = nlp(action)
if txt.__len__() > 1:
actionLemma.append(txt[0].lemma_ + " " + txt[1].text)
else:
actionLemma.append(txt[0].lemma_)
actionLemmaString = ",".join(actionLemma)
# ---- Objects ----
for j in range(0, doc.__len__()):
token = doc[j]
for action in actions:
# use the nlp model to extract the action lemma
txt = nlp(action)
if token.dep_ == "dobj" and token.head.text == action:
objects.append(token.text)
objectsPositions.append(j)
actionsWithObjects.append(action + "/" + token.text)
actionsWithObjectsLemma.append(txt[0].lemma_ + "/" + token.lemma_)
# Special cases
# verbs that go with prepositions
prepFlag = False
for action in actions:
action = action.lower()
# use the nlp model to extract the action lemma
txt = nlp(action)
if action in verbList or txt[0].lemma_ in verbList:
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "prep" and token.head.text == action:
prepFlag = True
prepPos = j
if prepFlag == True and token.dep_ == "pobj" and token.head.text == doc[prepPos].text:
objects.append(token.text)
objectsPositions.append(j)
actionsWithObjects.append(action + "/" + token.text)
actionsWithObjectsLemma.append(txt[0].lemma_ + "/" + token.lemma_)
# Save the subject of the subordinating sentence
for action in actions:
# use the nlp model to extract the action lemma
txt = nlp(action)
for token in doc:
if token.dep_ in ["nsubj", "nsubjpass"] and token.head.text == action and token.text not in actors:
objects.append(token.text)
actionsWithObjects.append(action + "/" + token.text)
actionsWithObjectsLemma.append(txt[0].lemma_ + "/" + token.lemma_)
# Find the other objects
for foundObject in objects:
# Find the action that the object is linked to, in order to save the conj objects with the same action too
for token in doc:
if token.text == foundObject:
tempAction = token.head.text
for token in doc:
if token.text == tempAction:
txt = token.lemma_
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "conj" and token.head.text == foundObject:
objects.append(token.text)
objectsPositions.append(j)
actionsWithObjects.append(tempAction + "/" + token.text)
actionsWithObjectsLemma.append(txt + "/" + token.lemma_)
# Find objects that are in this format select/deselect -> finds deselect
for j in objectsPositions:
# Find the action that the object is linked to, in order to save the conj objects with the same action too
tempAction = doc[j].head.text
for token in doc:
if token.text == tempAction:
txt = token.lemma_
if j + 1 >= doc.__len__() or j + 2 >= doc.__len__() or j - 1 < 0 or j - 2 < 0:
continue
else:
if doc[j + 1].text == "/":
objects.append(doc[j + 2].text)
actionsWithObjects.append(action + "/" + token.text)
actionsWithObjectsLemma.append(txt + "/" + token.lemma_)
elif doc[j - 1].text == "/":
objects.append(doc[j - 2].text)
actionsWithObjects.append(action + "/" + token.text)
actionsWithObjectsLemma.append(txt + "/" + token.lemma_)
# Remove duplicates
objects = list(dict.fromkeys(objects))
objectsString = ",".join(objects)
objectsString = objectsString.lower()
actionsWithObjectsString = ",".join(actionsWithObjects)
actionsWithObjectsString = actionsWithObjectsString.lower()
# print(objects)
objectLemma = []
for foundObject in objects:
txt = nlp(foundObject)
objectLemma.append(txt[0].lemma_)
objectLemmaString = ",".join(objectLemma)
# Find the conj verbs objects if they exist
# conj case
# ie:
conjPos = 0
hasObject = False
for action in actions:
txt = nlp(action)
for token in doc:
if token.head.text == action and token.dep_ == "dobj":
hasObject = True
if not hasObject:
for pos, token in enumerate(doc):
if token.head.text == action and token.dep_ == "conj":
conjPos = pos
for token in doc:
if token.head.text == doc[conjPos].text and token.dep_ == "dobj":
actionsWithObjectsLemma.append(txt[0].lemma_ + "/" + token.lemma_)
actionsWithObjectsLemmaString = ",".join(actionsWithObjectsLemma)
# ---- Object Property ----
objectProperties = []
for foundObject in objects:
# Save the object position in the sentence
for j in range(0, doc.__len__()):
token = doc[j]
if token.text == foundObject:
objPos = j
# 1st case: save as object property the words between the article (or det dependency) and the object
# ie: attend some UI / UX lessons -> UI/UX
# ie: develop an awesome and beautiful features website -> awesome and beautiful features
detFlag = False
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "det" and token.head.text == foundObject:
detPos = j
detFlag = True
if detFlag:
for j in range(detPos + 1, objPos):
token = doc[j]
objectProperties.append(token.text)
# 2nd case: compound and amod
if not detFlag:
for token in doc:
if (token.dep_ == "compound" or token.dep_ == "amod") and token.head.text == foundObject:
objectProperties.append(token.text)
# 3d case: Find prep dependency of the object
# ie: A Developer must be able to have a jQuery plugin for Core Data Packages .
# finds: Core Data Packages
adpFlag = False
pobjFlag = False
prepProp = ""
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "prep" and token.head.text == foundObject:
adpPos = j
adpFlag = True
continue
if adpFlag:
if token.dep_ == "pobj" and token.head.text == doc[adpPos].text:
pobjPos = j
pobjFlag = True
if adpFlag and pobjFlag:
for j in range(adpPos + 1, pobjPos + 1):
token = doc[j]
prepProp = prepProp + " " + token.text
objectProperties.append(prepProp)
# 4th case:
# ie: A user must be able to create a user account by providing a username and a password.
# finds: username, password
# ie: A logged in user must be able to mark his bookmarks as public or private
# finds: public, private
adpFlag = False
pcompFlag = False
dobjFlag = False
amodFlag = False
for action in actions:
for j in range(0, doc.__len__()):
token = doc[j]
if token.dep_ == "prep" and token.head.text == action:
adpPos = j
adpFlag = True
continue
if adpFlag:
if token.dep_ == "pcomp" and token.head.text == doc[adpPos].text:
pcompPos = j
pcompFlag = True
# case: mark as public or private
elif token.dep_ == "amod" and token.head.text == doc[adpPos].text:
objectProperties.append(token.text)
elif token.dep_ == "pobj" and token.head.text == doc[adpPos].text:
objectProperties.append(token.text)
if adpFlag and pcompFlag:
for j in range(adpPos, doc.__len__()):
token = doc[j]
if token.dep_ == "dobj" and token.head.text == doc[pcompPos].text:
dobjFlag = True
dobjPos = j
objectProperties.append(token.text)
# 5th case:
# country's currency -> finds country
for foundObject in objects:
for token in doc:
if token.dep_ == "poss" and token.head.text == foundObject and token.pos_ != "PRON":
objectProperties.append(token.text)
# Find the other object properties
for objectProperty in objectProperties:
for token in doc:
if token.dep_ == "conj" and token.head.text == objectProperty:
objectProperties.append(token.text)
# Remove duplicates
objectProperties = list(dict.fromkeys(objectProperties))
for foundObject in objects:
for objectProperty in objectProperties:
if objectProperty == foundObject:
objectProperties.remove(objectProperty)
# print(objectProperties)
objectPropertiesString = ",".join(objectProperties)
objectPropertiesString = objectPropertiesString.lower()
# ----------------------------------------------------------------------
# Filter the stop words before export
aoFiltered =[]
for ao in actionsWithObjectsLemma:
temp = nlp(ao)
if temp[-1].is_stop == False and temp[0].text != "be":
aoFiltered.append(ao)
aoFilteredString = ",".join(aoFiltered)
# ----------------------------------------------------------------------
# Save the Ontology in different formats
if saveMode == 'ao-aa':
# Lemmatized Actions - Objects + Actor - Actions
actorActionsLemma = []
for action_Lemma in actionLemma:
actorActionsLemma.append(actorsLemmaString + "/" + action_Lemma)
actorActionsStringLemma = ",".join(actorActionsLemma)
if numFiles == 1:
for l in actorActionsLemma:
dataTransformed.append(l)
for l in actionsWithObjectsLemma:
dataTransformed.append(l)
# if actionsWithObjectsLemmaString != '':
# dataTransformed.append(actionsWithObjectsLemmaString)
else:
if actionsWithObjectsLemmaString == '':
dataTogetherString = actorActionsStringLemma
elif actorActionsStringLemma == '':
dataTogetherString = actionsWithObjectsLemmaString
else:
dataTogether = actionsWithObjectsLemmaString, actorActionsStringLemma
dataTogetherString = ",".join(dataTogether)
dataTogetherString = dataTogetherString.lower()
dataTransformed.append(dataTogetherString)
elif saveMode == 'ao-aa-f':
# Lemmatized Actions - Objects + Actor - Actions and FILTERED
actorActionsLemma = []
for action_Lemma in actionLemma:
temp = nlp(action_Lemma)
if temp[-1].text != 'be':
actorActionsLemma.append(actorsLemmaString + "/" + action_Lemma)
actorActionsStringLemma = ",".join(actorActionsLemma)
if numFiles == 1:
for l in actorActionsLemma:
dataTransformed.append(l)
for l in actionsWithObjectsLemma:
dataTransformed.append(l)
else:
if aoFilteredString == '':
dataTogetherString = actorActionsStringLemma
elif actorActionsStringLemma == '':
dataTogetherString = aoFilteredString
else:
dataTogether = aoFilteredString, actorActionsStringLemma
dataTogetherString = ",".join(dataTogether)
dataTogetherString = dataTogetherString.lower()
dataTransformed.append(dataTogetherString)
elif saveMode == 'all':
# All together and Lemmatized
dataTogether = actorsLemmaString, actionLemmaString, objectLemmaString, objectPropertiesString
dataTogetherString = ",".join(dataTogether)
dataTransformed.append(dataTogetherString.lower())
elif saveMode == 'ao':
# Lemmatized Actions - Objects
for l in actionsWithObjectsLemma:
dataTransformed.append(l)
elif saveMode == 'ao-f':
# Lemmatized Actions - Objects FILTERED
for l in aoFiltered:
dataTransformed.append(l)
# In case you want every file to be in one row to the final dataset
if numFiles !=1:
newData = ",".join(dataTransformed)
# File
if numFiles == 1:
df = pd.DataFrame(dataTransformed, )
df.to_csv(f"{outputPath}", header=None, index=None)
# f = codecs.open(f"{outputPath}", "w", "utf-8")
# f.write(newData)
else:
f = codecs.open(f"{outputPath}/{k}.csv", "w", "utf-8")
f.write(newData)
f.close()
# In case you want every file to be in one row to the final dataset
if numFiles != 1:
f = codecs.open(f"{outputPath}.txt", "w", "utf-8")
for k in range(1, numFiles + 1):
data =
|
pd.read_csv(f"{outputPath}/{k}.csv", header=None, sep='\t')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
'''
This code calculates impacts of temperature changes induced by aerosols on GDP
apply the Dell et al. damage function
distribution of Dell et al. parameter was sampled (1000 times) based on the provided median and standard error
by <NAME> (<EMAIL>)
'''
from netCDF4 import Dataset
import pandas as pd
import numpy as np
import _env
import datetime
import xarray as xr
nens = _env.nens
datasets = _env.datasets
year = _env.year
syr = str(year)
gdp_year = year
sgdp_year = str(gdp_year)
par = 'TREFHT'
ds = 'ERA-Interim'
p_scen = 'No-Aerosol'
if_temp = _env.odir_root + '/sim_temperature/Simulated_Global_and_Country_' + par + '_20yravg.nc'
if_ctry_list = _env.idir_root + '/regioncode/Country_List.xls'
if_ctry_pr = _env.idir_root + '/historical_stat/Ctry_Poor_Rich_from_Burke.csv' #adopt country list from Burke et al. 2018
if_ctry_gdpcap = _env.idir_root + '/historical_stat/' + '/API_NY.GDP.PCAP.KD_DS2_en_csv_v2.csv'
if_ctry_pop = _env.idir_root + '/historical_stat/' + '/API_SP.POP.TOTL_DS2_en_csv_v2.csv'
odir_gdp = _env.odir_root + '/gdp_' + ds + '/'
_env.mkdirs(odir_gdp)
#climatological temperature from three datasets
if_clim_temp = _env.odir_root + 'sim_temperature/Climatological_Temp_Ctry_3ds.csv'
itbl_clim_temp = pd.read_csv(if_clim_temp,index_col = 0)[['iso',ds]]
#country list
itbl_ctry_info = pd.read_csv(_env.odir_root + '/basic_stats/' + 'Country_Basic_Stats.csv')
#read global and country-level temperature
T_glob = Dataset(if_temp)['TREFHT_Global'][:,[0,1]]
T_ctry_full = Dataset(if_temp)['TREFHT_Country'][:,:,[0,1]]
#extract temperature for analyzed countries
T_ctry = T_ctry_full[((itbl_ctry_info['ind_in_full_list'].astype(int)).tolist()),:,:]
T_diff = T_ctry[:,:,1]-T_ctry[:,:,0]
T_ctry[:,:,0] = np.repeat(np.array(itbl_clim_temp[ds].values)[:,np.newaxis],8,axis=1)
T_ctry[:,:,1] = T_ctry[:,:,0] + T_diff
####country-level changes in GDP/cap growth rate####
########
# the net effect of a 1◦ C rise in temperature is to decrease growth rates in poor countries by −1.394 percentage points. (Dell,Jones, and Olken, 2012) Table 2
#median = -1.394
#standard error=0.408
if_gen_pars = 0
n_boot_sample = 1000
def cal_theta(theta,se_theta):
return np.random.normal(loc=theta,scale=se_theta,size=n_boot_sample)
if if_gen_pars:
#generate 1000 sets of parameters for the selected damage function
djo_pars = cal_theta(-1.394,0.408)/100
_env.mkdirs(_env.idir_root + '/Dell_parameters/')
xr.Dataset({'djo_pars' : xr.DataArray(djo_pars,dims = ['boots'])}).to_netcdf(_env.idir_root + '/Dell_parameters/' + '/DJO_parameters.nc')
else:
djo_pars = xr.open_dataset(_env.idir_root + '/Dell_parameters/' + '/DJO_parameters.nc')['djo_pars'].values
n_ctry = len(itbl_ctry_info.index)
ifs_rich = 1-itbl_ctry_info['poor']
poor_ind = np.where(ifs_rich == 0)[0]
diff_gr = np.zeros([n_boot_sample, np.shape(T_ctry)[0],np.shape(T_ctry)[1]])
diff_gr[:,poor_ind,:] = np.einsum('i,jk->ijk',djo_pars, np.squeeze(T_ctry[poor_ind,:,1]-T_ctry[poor_ind,:,0])) #*(0.2609434-1.655145)/100 #no-aerosol minus with-aerosol
diff_gdp = np.einsum('ijk,j->ijk',diff_gr,itbl_ctry_info[str(gdp_year) + '_gdp'])
_env.rmfile(odir_gdp + 'GDP_Changes_' + 'Dell_' + str(gdp_year) + '_' + ds + '_' + p_scen + '.nc')
onc = Dataset(odir_gdp + 'GDP_Changes_' + 'Dell_' + str(gdp_year) + '_' + ds + '_' + p_scen + '.nc', 'w', format='NETCDF4')
d_ctry = onc.createDimension('boots',n_boot_sample)
d_ctry = onc.createDimension('countries',n_ctry)
d_ens = onc.createDimension('ensembles',nens)
v_ratio = onc.createVariable('GDP_Ratio','f4',('boots','countries','ensembles'))
v_ratio.desc = 'Impacts of aerosol-induced cooling on annual GDP growth rate'
v_ratio[:] = diff_gr
v_gdp = onc.createVariable('GDP','f4',('boots','countries','ensembles'))
v_gdp.desc = 'Impacts of aerosol-induced cooling on country-level annual GDP'
v_gdp[:] = diff_gdp
#write global attribute
onc.by = '<NAME> (<EMAIL>)'
onc.desc = 'Impacts of aerosol-induced cooling on annual GDP and GDP growth rate (based on damage functions by Pretis et al. 2018)'
onc.creattime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
onc.close()
####summarize global and regional GDP changes####
itbl_gdp_baseline = itbl_ctry_info.copy()
odir_summary = _env.odir_root + 'summary_' + ds
_env.mkdirs(odir_summary)
writer =
|
pd.ExcelWriter(odir_summary + '/country_specific_statistics_GDP_'+ds+'_'+p_scen+'_Dell.xls')
|
pandas.ExcelWriter
|
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, concat
import pandas._testing as tm
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)],
names=["one", "two"],
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
@pytest.mark.parametrize(
"dtype,expected",
[
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
(
"category",
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
(
{"a": "category", "b": "category"},
DataFrame({"a":
|
Categorical([])
|
pandas.Categorical
|
from scipy import stats
import pandas as pd
import numpy as np
path_mutlivariate_feat_imps = '/n/groups/patel/samuel/EWAS/feature_importances_paper/'
Environmental = ['Clusters_Alcohol', 'Clusters_Diet', 'Clusters_Education', 'Clusters_ElectronicDevices',
'Clusters_Employment', 'Clusters_FamilyHistory', 'Clusters_Eyesight', 'Clusters_Mouth',
'Clusters_GeneralHealth', 'Clusters_Breathing', 'Clusters_Claudification', 'Clusters_GeneralPain',
'Clusters_ChestPain', 'Clusters_CancerScreening', 'Clusters_Medication', 'Clusters_Hearing',
'Clusters_Household', 'Clusters_MentalHealth', 'Clusters_OtherSociodemographics',
'Clusters_PhysicalActivityQuestionnaire', 'Clusters_SexualFactors', 'Clusters_Sleep', 'Clusters_SocialSupport',
'Clusters_SunExposure', 'Clusters_EarlyLifeFactors', 'Clusters_Smoking']
Biomarkers = ['Clusters_PhysicalActivity', 'Clusters_HandGripStrength', 'Clusters_BrainGreyMatterVolumes', 'Clusters_BrainSubcorticalVolumes',
'Clusters_HeartSize', 'Clusters_HeartPWA', 'Clusters_ECGAtRest', 'Clusters_AnthropometryImpedance',
'Clusters_UrineBiochemistry', 'Clusters_BloodBiochemistry', 'Clusters_BloodCount',
'Clusters_EyeAutorefraction', 'Clusters_EyeAcuity', 'Clusters_EyeIntraoculaPressure',
'Clusters_BraindMRIWeightedMeans', 'Clusters_Spirometry', 'Clusters_BloodPressure',
'Clusters_AnthropometryBodySize', 'Clusters_ArterialStiffness', 'Clusters_CarotidUltrasound',
'Clusters_BoneDensitometryOfHeel', 'Clusters_HearingTest', 'Clusters_CognitiveFluidIntelligence', 'Clusters_CognitiveMatrixPatternCompletion',
'Clusters_CognitiveNumericMemory', 'Clusters_CognitivePairedAssociativeLearning', 'Clusters_CognitivePairsMatching', 'Clusters_CognitiveProspectiveMemory',
'Clusters_CognitiveReactionTime', 'Clusters_CognitiveSymbolDigitSubstitution', 'Clusters_CognitiveTowerRearranging', 'Clusters_CognitiveTrailMaking']
Pathologies = ['medical_diagnoses_%s' % letter for letter in ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']]
Clusters = []
All = Environmental + Biomarkers + Pathologies #+ ['Genetics']
organs = ['\*', '*instances01', '*instances1.5x', '*instances23', 'Abdomen' , 'AbdomenLiver' , 'AbdomenPancreas' , 'Arterial' , 'ArterialCarotids' , 'ArterialPulseWaveAnalysis' , 'Biochemistry' , 'BiochemistryBlood' , 'BiochemistryUrine' , 'Brain' , 'BrainCognitive' , 'BrainMRI' , 'Eyes' , 'EyesAll' , 'EyesFundus' , 'EyesOCT' , 'Hearing' , 'Heart' , 'HeartECG' , 'HeartMRI' , 'ImmuneSystem' , 'Lungs' , 'Musculoskeletal' , 'MusculoskeletalFullBody' , 'MusculoskeletalHips' , 'MusculoskeletalKnees' , 'MusculoskeletalScalars' , 'MusculoskeletalSpine' , 'PhysicalActivity']
path_heritability = '/n/groups/patel/Alan/Aging/Medical_Images/GWAS_hits_Age'
def Create_data(corr_type, model):
df_corr_env =
|
pd.DataFrame(columns = ['env_dataset', 'organ_1', 'organ_2', 'corr', 'sample_size'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
import os
import json
import h5py
import argparse
import pandas as pd
import numpy as np
import tinydb as db
from tinydb.storages import MemoryStorage
from pprint import pprint
import matplotlib.pyplot as plt
plt.style.use('../clint.mpl')
from matplotlib.colors import LogNorm
from pygama import DataGroup
import pygama.io.lh5 as lh5
import pygama.analysis.histograms as pgh
import pygama.analysis.peak_fitting as pgf
def main():
doc="""
analysis of Aug 2020 OPPI+CAGE commissioning runs (138-141)
tasks:
- load calibration from energy_cal
- show 1460 peak stability
- show removal of low-e retrigger noise
- look at waveforms near 5 MeV, confirm they're muon-related
- look at low-e waveforms, examine noise
- determine pz correction value
"""
rthf = argparse.RawTextHelpFormatter
par = argparse.ArgumentParser(description=doc, formatter_class=rthf)
arg, st, sf = par.add_argument, 'store_true', 'store_false'
arg('-q', '--query', nargs=1, type=str,
help="select file group to calibrate: -q 'run==1' ")
args = par.parse_args()
# load main DataGroup, select files from cmd line
dg = DataGroup('cage.json', load=True)
if args.query:
que = args.query[0]
dg.fileDB.query(que, inplace=True)
else:
dg.fileDB = dg.fileDB[-1:]
view_cols = ['runtype', 'run', 'cycle', 'startTime', 'runtime', 'threshold']
print(dg.fileDB[view_cols])
# -- run routines --
# show_raw_spectrum(dg)
# show_cal_spectrum(dg)
# show_wfs(dg)
# data_cleaning(dg)
# peak_drift(dg)
# pole_zero(dg)
label_alpha_runs(dg)
def show_raw_spectrum(dg):
"""
show spectrum w/ onbd energy and trapE
- get calibration constants for onbd energy and 'trapE' energy
- TODO: fit each expected peak and get resolution vs energy
"""
# get file list and load energy data (numpy array)
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
dsp_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB['dsp_file']
edata = lh5.load_nda(dsp_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/dsp')
rt_min = dg.fileDB['runtime'].sum()
u_start = dg.fileDB.iloc[0]['startTime']
t_start = pd.to_datetime(u_start, unit='s') # str
print('Found energy data:', [(et, len(ev)) for et, ev in edata.items()])
print(f'Runtime (min): {rt_min:.2f}')
elo, ehi, epb, etype = 0, 25000, 10, 'trapEmax'
ene_uncal = edata[etype]
hist, bins, _ = pgh.get_hist(ene_uncal, range=(elo, ehi), dx=epb)
# normalize by runtime
hist_rt = np.divide(hist, rt_min * 60)
plt.plot(np.nan, np.nan, '-w', lw=1, label=t_start)
plt.semilogy(bins[1:], hist_rt, ds='steps', c='b', lw=1,
label=f'{etype}, {rt_min:.2f} mins')
plt.xlabel(etype, ha='right', x=1)
plt.ylabel('cts / sec', ha='right', y=1)
plt.legend()
plt.tight_layout()
plt.show()
def show_cal_spectrum(dg):
"""
apply calibration to dsp file
"""
# get file list and load energy data (numpy array)
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
dsp_list = lh5_dir + dg.fileDB['dsp_path'] + '/' + dg.fileDB['dsp_file']
edata = lh5.load_nda(dsp_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/dsp')
rt_min = dg.fileDB['runtime'].sum()
u_start = dg.fileDB.iloc[0]['startTime']
t_start = pd.to_datetime(u_start, unit='s') # str
print('Found energy data:', [(et, len(ev)) for et, ev in edata.items()])
print(f'Runtime (min): {rt_min:.2f}')
# load calibration from peakfit
cal_db = db.TinyDB(storage=MemoryStorage)
with open('ecalDB.json') as f:
raw_db = json.load(f)
cal_db.storage.write(raw_db)
runs = dg.fileDB.run.unique()
if len(runs) > 1:
print("sorry, I can't do combined runs yet")
exit()
run = runs[0]
tb = cal_db.table("peakfit_trapEmax").all()
df_cal = pd.DataFrame(tb)
df_cal['run'] = df_cal['run'].astype(int)
df_run = df_cal.loc[df_cal.run==run]
cal_pars = df_run.iloc[0][['cal0','cal1','cal2']]
# compute calibrated energy
pol = np.poly1d(cal_pars) # handy numpy polynomial object
cal_data = pol(edata['trapEmax'])
elo, ehi, epb, etype = 0, 3000, 1, 'trapEmax_cal' # gamma region
elo, ehi, epb, etype = 2500, 8000, 10, 'trapEmax_cal' # overflow region
# elo, ehi, epb, etype = 0, 250, 1, 'trapEmax_cal' # low-e region
hist, bins, _ = pgh.get_hist(cal_data, range=(elo, ehi), dx=epb)
# normalize by runtime
hist_rt = np.divide(hist, rt_min * 60)
plt.plot(np.nan, np.nan, '-w', lw=1, label=f'start: {t_start}')
plt.plot(bins[1:], hist_rt, ds='steps', c='b', lw=1,
label=f'{etype}, {rt_min:.2f} mins')
plt.xlabel(etype, ha='right', x=1)
plt.ylabel('cts / sec', ha='right', y=1)
plt.legend(loc=1, fontsize=12)
plt.tight_layout()
plt.savefig('./plots/CalSpectrum.png')
# plt.show()
def show_wfs(dg):
"""
show waveforms in different enery regions.
use the hit file to select events
"""
# get file list and load hit data
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
hit_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
df_hit = lh5.load_dfs(hit_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/hit')
print(df_hit)
print(df_hit.columns)
# settings
etype = 'trapEmax_cal'
nwfs = 20
# elo, ehi, epb = 0, 100, 0.2 # low-e region
elo, ehi, epb = 0, 20, 0.2 # noise region
# elo, ehi, epb = 1458, 1468, 1 # good physics events
# elo, ehi, epb = 6175, 6250, 1 # overflow peak
# elo, ehi, epb = 5000, 5200, 0.2 # lower overflow peak
# # diagnostic plot
# hE, xE, vE = pgh.get_hist(df_hit[etype], range=(elo, ehi), dx=epb)
# plt.plot(xE[1:], hE, c='b', ds='steps')
# plt.show()
# exit()
# select waveforms
idx = df_hit[etype].loc[(df_hit[etype] >= elo) &
(df_hit[etype] <= ehi)].index[:nwfs]
raw_store = lh5.Store()
tb_name = 'ORSIS3302DecoderForEnergy/raw'
raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']
f_raw = raw_list.values[0] # fixme, only works for one file rn
data_raw = raw_store.read_object(tb_name, f_raw, start_row=0, n_rows=idx[-1]+1)
wfs_all = data_raw['waveform']['values'].nda
wfs = wfs_all[idx.values, :]
ts = np.arange(0, wfs.shape[1], 1)
# plot wfs
for iwf in range(wfs.shape[0]):
plt.plot(ts, wfs[iwf,:], lw=1)
plt.xlabel('time (clock ticks)', ha='right', x=1)
plt.ylabel('ADC', ha='right', y=1)
plt.show()
# plt.savefig('./plots/noise_wfs.png', dpi=300)
# plt.cla()
def data_cleaning(dg):
"""
using parameters in the hit file, plot 1d and 2d spectra to find cut values.
columns in file:
['trapE', 'bl', 'bl_sig', 'A_10', 'AoE', 'packet_id', 'ievt', 'energy',
'energy_first', 'timestamp', 'crate', 'card', 'channel', 'energy_cal',
'trapE_cal']
note, 'energy_first' from first value of energy gate.
"""
i_plot = 0 # run all plots after this number
# get file list and load hit data
lh5_dir = dg.lh5_user_dir if user else dg.lh5_dir
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
hit_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
df_hit = lh5.load_dfs(hit_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/hit')
# print(df_hit)
print(df_hit.columns)
# get info about df -- 'describe' is very convenient
dsc = df_hit[['bl','bl_sig','A_10','ts_sec']].describe()
# print(dsc)
# exit()
if i_plot <= 0:
# bl vs energy
elo, ehi, epb = 0, 50, 1
blo, bhi, bpb = 0, 10000, 100
nbx = int((ehi-elo)/epb)
nby = int((bhi-blo)/bpb)
h = plt.hist2d(df_hit['trapEmax_cal'], df_hit['bl'], bins=[nbx,nby],
range=[[elo, ehi], [blo, bhi]], cmap='jet')
cb = plt.colorbar(h[3], ax=plt.gca())
plt.xlabel('trapEmax_cal', ha='right', x=1)
plt.ylabel('bl', ha='right', y=1)
plt.tight_layout()
# plt.show()
plt.savefig('./plots/oppi_bl_vs_e.png', dpi=300)
cb.remove()
plt.cla()
# make a formal baseline cut from 1d histogram
hE, bins, vE = pgh.get_hist(df_hit['bl'], range=(blo, bhi), dx=bpb)
xE = bins[1:]
plt.semilogy(xE, hE, c='b', ds='steps')
bl_cut_lo, bl_cut_hi = 8000, 9500
plt.axvline(bl_cut_lo, c='r', lw=1)
plt.axvline(bl_cut_hi, c='r', lw=1)
plt.xlabel('bl', ha='right', x=1)
plt.ylabel('counts', ha='right', y=1)
# plt.show()
plt.savefig('./plots/oppi_bl_cut.png')
plt.cla()
if i_plot <= 1:
# A_10/trapEmax_cal vs trapEmax_cal (A/E vs E)
# use baseline cut
df_cut = df_hit.query('bl > 8000 and bl < 9500').copy()
# add new A/E column
df_cut['aoe'] = df_cut['A_10'] / df_cut['trapEmax_cal']
# alo, ahi, apb = -1300, 350, 1
# elo, ehi, epb = 0, 250, 1
alo, ahi, apb = 0, 0.4, 0.005
# elo, ehi, epb = 0, 3000, 10
elo, ehi, epb = 0, 6000, 10
nbx = int((ehi-elo)/epb)
nby = int((ahi-alo)/apb)
h = plt.hist2d(df_cut['trapEmax_cal'], df_cut['aoe'], bins=[nbx,nby],
range=[[elo, ehi], [alo, ahi]], cmap='jet', norm=LogNorm())
plt.xlabel('trapEmax_cal', ha='right', x=1)
plt.ylabel('A/E', ha='right', y=1)
plt.tight_layout()
# plt.show()
plt.savefig('./plots/oppi_aoe_vs_e.png', dpi=300)
plt.cla()
if i_plot <= 2:
# show effect of baseline cut on low-energy spectrum
df_cut = df_hit.query('bl > 8000 and bl < 9500')
etype = 'trapEmax_cal'
elo, ehi, epb = 0, 250, 0.5
# no cuts
h1, x1, v1 = pgh.get_hist(df_hit[etype], range=(elo, ehi), dx=epb)
x1 = x1[1:]
plt.plot(x1, h1, c='k', lw=1, ds='steps', label='raw')
# baseline cut
h2, x2, v2 = pgh.get_hist(df_cut[etype], range=(elo, ehi), dx=epb)
plt.plot(x1, h2, c='b', lw=1, ds='steps', label='bl cut')
plt.xlabel(etype, ha='right', x=1)
plt.ylabel('counts', ha='right', y=1)
plt.legend()
# plt.show()
plt.savefig('./plots/oppi_lowe_cut.png')
plt.cla()
if i_plot <= 3:
# show DCR vs E
etype = 'trapEmax_cal'
elo, ehi, epb = 0, 6000, 10
dlo, dhi, dpb = -1000, 1000, 10
nbx = int((ehi-elo)/epb)
nby = int((dhi-dlo)/dpb)
h = plt.hist2d(df_cut['trapEmax_cal'], df_cut['dcr'], bins=[nbx,nby],
range=[[elo, ehi], [dlo, dhi]], cmap='jet', norm=LogNorm())
plt.xlabel('trapEmax_cal', ha='right', x=1)
plt.ylabel('DCR', ha='right', y=1)
plt.tight_layout()
# plt.show()
plt.savefig('./plots/oppi_dcr_vs_e.png', dpi=300)
plt.cla()
def peak_drift(dg):
"""
show any drift of the 1460 peak (5 minute bins)
"""
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
hit_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
df_hit = lh5.load_dfs(hit_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/hit')
df_hit.reset_index(inplace=True)
rt_min = dg.fileDB['runtime'].sum()
print(f'runtime: {rt_min:.2f} min')
# settings
elo, ehi, epb, etype = 1450, 1470, 1, 'trapEmax_cal'
df_hit = df_hit.query(f'trapEmax_cal > {elo} and trapEmax_cal < {ehi}').copy()
# # diagnostic plot
# hE, xE, vE = pgh.get_hist(df_hit[etype], range=(elo, ehi), dx=epb)
# plt.plot(xE[1:], hE, c='b', ds='steps')
# plt.show()
t0 = df_hit['ts_glo'].values[0]
df_hit['ts_adj'] = (df_hit['ts_glo'] - t0) / 60 # minutes after 0
tlo, thi, tpb = 0, df_hit['ts_adj'].max(), 1
nbx = int((thi-tlo)/tpb)
nby = int((ehi-elo)/epb)
h = plt.hist2d(df_hit['ts_adj'], df_hit['trapEmax_cal'], bins=[nbx,nby],
range=[[tlo, thi], [elo, ehi]], cmap='jet')
plt.xlabel(f'Time ({tpb:.1f} min/bin)', ha='right', x=1)
plt.ylabel('trapEmax_cal', ha='right', y=1)
plt.tight_layout()
# plt.show()
plt.savefig('./plots/oppi_1460_drift.png', dpi=300)
def pole_zero(dg):
"""
NOTE: I think this result might be wrong, for the CAGE amp it should be
around 250 usec. Need to check.
"""
# load hit data
lh5_dir = os.path.expandvars(dg.config['lh5_dir'])
hit_list = lh5_dir + dg.fileDB['hit_path'] + '/' + dg.fileDB['hit_file']
df_hit = lh5.load_dfs(hit_list, ['trapEmax'], 'ORSIS3302DecoderForEnergy/hit')
df_hit.reset_index(inplace=True)
rt_min = dg.fileDB['runtime'].sum()
# print(f'runtime: {rt_min:.2f} min')
# load waveforms
etype = 'trapEmax_cal'
nwfs = 20
elo, ehi = 1455, 1465
# select waveforms
idx = df_hit[etype].loc[(df_hit[etype] >= elo) &
(df_hit[etype] <= ehi)].index[:nwfs]
raw_store = lh5.Store()
tb_name = 'ORSIS3302DecoderForEnergy/raw'
raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']
f_raw = raw_list.values[0] # fixme, only works for one file rn
data_raw = raw_store.read_object(tb_name, f_raw, start_row=0, n_rows=idx[-1]+1)
wfs_all = data_raw['waveform']['values'].nda
wfs = wfs_all[idx.values, :]
df_wfs =
|
pd.DataFrame(wfs)
|
pandas.DataFrame
|
"""A class of recommender systems working on the anime dataset found at:
https://www.kaggle.com/CooperUnion/anime-recommendations-database/discussion/30036
The (abstract) base class defines many useful methods, but leaves the two main
pieces, training and predicting, to be implemented as subclasses.
For now, only the standard latent factor model, with no user or item features,
has been implemented.
Written by: <NAME>, 04/2017
"""
from collections import defaultdict
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pandas as pd
import abc
from util.data import get_data, remove_users
from util.user_info import get_user_info
class RecSysModel(object):
__metaclass__ = abc.ABCMeta
def __init__(self, username, min_num):
self.users, self.anime = get_data()
self.username = username
self.train = remove_users(self.users, min_num)
self.add_to_users(username)
#------Setup------#
def add_to_users(self, username):
"""Explicitly add info of user for whom we're building the RecSys."""
ids, ratings = get_user_info(username)
tmp = np.vstack(
[np.tile(max(self.users.user_id)+1, len(ids)),
np.array(ids), np.array(ratings)])
tmp2 = pd.DataFrame(np.transpose(tmp))
tmp2.columns = ['user_id', 'anime_id', 'rating']
self.train = pd.concat([self.train, tmp2], ignore_index=True)
self.uid = max(self.users.user_id)+1
self.seen_anime = ids
def train_test_split(self):
"""Split the train, test data if necessary."""
train, self.test = train_test_split(self.users, test_size = 0.1)
train, self.valid = train_test_split(train, test_size = 0.2)
#-------Methods to be defined by every subclass------#
@abc.abstractmethod
def train_model(self):
"""Trains the tensorflow model."""
pass
@abc.abstractmethod
def predict(self, mask):
"""
Predicts, for a given user, a certain set of shows.
Massk is an array of 0s and 1s (or Falses and Trues) of length
len(nAnime) representing if that anime is to be predicted or not.
"""
pass
#-------Helper methods to convert anime headings-------#
def convert_ids_to_names(self, ids):
lookup = pd.Series(self.anime.name.values,index=self.anime.anime_id).to_dict()
namer = lambda id_: lookup[id_]
vfunc = np.vectorize(namer)
return vfunc(ids)
def convert_names_to_ids(self, names):
lookup =
|
pd.Series(self.anime.anime_id,index=self.anime.name.values)
|
pandas.Series
|
#!/usr/bin/env python
'''Run a reblocking analysis on pauxy QMC output files.'''
import glob
import h5py
import json
import numpy
import pandas as pd
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import pyblock
import scipy.stats
from pauxy.analysis.extraction import (
extract_mixed_estimates,
extract_data,
get_metadata, set_info,
extract_rdm
)
from pauxy.utils.misc import get_from_dict
from pauxy.utils.linalg import get_ortho_ao_mod
from pauxy.analysis.autocorr import reblock_by_autocorr
def average_single(frame, delete=True, multi_sym=False):
if multi_sym:
short = frame.groupby('Iteration')
else:
short = frame
means = short.mean()
err = short.aggregate(lambda x: scipy.stats.sem(x, ddof=1))
averaged = means.merge(err, left_index=True, right_index=True,
suffixes=('', '_error'))
columns = [c for c in averaged.columns.values if '_error' not in c]
columns = [[c, c+'_error'] for c in columns]
columns = [item for sublist in columns for item in sublist]
averaged.reset_index(inplace=True)
delcol = ['Weight', 'Weight_error']
for d in delcol:
if delete:
columns.remove(d)
return averaged[columns]
def average_ratio(numerator, denominator):
re_num = numerator.real
re_den = denominator.real
im_num = numerator.imag
im_den = denominator.imag
# When doing FP we need to compute E = \bar{ENumer} / \bar{EDenom}
# Only compute real part of the energy
num_av = (re_num.mean()*re_den.mean()+im_num.mean()*im_den.mean())
den_av = (re_den.mean()**2 + im_den.mean()**2)
mean = num_av / den_av
# Doing error analysis properly is complicated. This is not correct.
re_nume = scipy.stats.sem(re_num)
re_dene = scipy.stats.sem(re_den)
# Ignoring the fact that the mean includes complex components.
cov = numpy.cov(re_num, re_den)[0,1]
nsmpl = len(re_num)
error = abs(mean) * ((re_nume/re_num.mean())**2 +
(re_dene/re_den.mean())**2 -
2*cov/(nsmpl*re_num.mean()*re_den.mean()))**0.5
return (mean, error)
def average_fp(frame):
iteration = numpy.real(frame['Iteration'].values)
frame = frame.drop('Iteration', axis=1)
real_df = frame.apply(lambda x: x.real)
imag_df = frame.apply(lambda x: x.imag)
real_df['Iteration'] = iteration
imag_df['Iteration'] = iteration
real = average_single(real_df, multi_sym=True)
imag = average_single(imag_df, multi_sym=True)
results = pd.DataFrame()
re_num = real.ENumer.values
re_den = real.EDenom.values
im_num = imag.ENumer.values
im_den = imag.EDenom.values
results['Iteration'] = sorted(real_df.groupby('Iteration').groups.keys())
# When doing FP we need to compute E = \bar{ENumer} / \bar{EDenom}
# Only compute real part of the energy
results['E'] = (re_num*re_den+im_num*im_den) / (re_den**2 + im_den**2)
# Doing error analysis properly is complicated. This is not correct.
re_nume = real.ENumer_error.values
re_dene = real.EDenom_error.values
# Ignoring the fact that the mean includes complex components.
cov_nd = real_df.groupby('Iteration').apply(lambda x: x['ENumer'].cov(x['EDenom'])).values
nsamp = len(re_nume)
results['E_error'] = numpy.abs(results.E) * ((re_nume/re_num)**2 +
(re_dene/re_den)**2 -
2*cov_nd/(nsamp*re_num*re_den))**0.5
return results
def reblock_mixed(groupby, columns, verbose=False):
analysed = []
for group, frame in groupby:
drop = ['index', 'Time', 'EDenom', 'ENumer', 'Weight', 'Overlap',
'WeightFactor', 'EHybrid']
if not verbose:
drop += ['E1Body', 'E2Body']
short = frame.reset_index()
try:
short = short.drop(columns+drop, axis=1)
except KeyError:
short = short.drop(columns+['index'], axis=1)
(data_len, blocked_data, covariance) = pyblock.pd_utils.reblock(short)
reblocked = pd.DataFrame({'ETotal': [0.0]})
for c in short.columns:
try:
rb = pyblock.pd_utils.reblock_summary(blocked_data.loc[:,c])
reblocked[c] = rb['mean'].values[0]
reblocked[c+'_error'] = rb['standard error'].values
reblocked[c+'_error_error'] = rb['standard error error'].values
ix = list(blocked_data[c]['optimal block']).index('<--- ')
reblocked[c+'_nsamp'] = data_len.values[ix]
except KeyError:
if verbose:
print("Reblocking of {:4} failed. Insufficient "
"statistics.".format(c))
for i, v in enumerate(group):
reblocked[columns[i]] = v
analysed.append(reblocked)
final = pd.concat(analysed, sort=True)
y = short["ETotal"].values
reblocked_ac = reblock_by_autocorr(y)
for c in reblocked_ac.columns:
final[c] = reblocked_ac[c].values
return final
def reblock_free_projection(frame):
short = frame.drop(['Time', 'Weight', 'ETotal'], axis=1)
analysed = []
(data_len, blocked_data, covariance) = pyblock.pd_utils.reblock(short)
reblocked = pd.DataFrame()
denom = blocked_data.loc[:,'EDenom']
for c in short.columns:
if c != 'EDenom':
nume = blocked_data.loc[:,c]
cov = covariance.xs('EDenom', level=1)[c]
ratio = pyblock.error.ratio(nume, denom, cov, data_len)
rb = pyblock.pd_utils.reblock_summary(ratio)
try:
if c == 'ENumer':
c = 'ETotal'
reblocked[c] = rb['mean'].values
reblocked[c+'_error'] = rb['standard error'].values
except KeyError:
print("Reblocking of {:4} failed. Insufficient "
"statistics.".format(c))
analysed.append(reblocked)
if len(analysed) == 0:
return None
else:
return pd.concat(analysed)
def reblock_local_energy(filename, skip=0):
data = extract_mixed_estimates(filename)
results = reblock_mixed(data.apply(numpy.real)[skip:])
if results is None:
return None
else:
try:
energy = results['ETotal'].values[0]
error = results['ETotal_error'].values[0]
return (energy, error)
except KeyError:
return None
def average_rdm(files, skip=1, est_type='back_propagated', rdm_type='one_rdm', ix=None):
rdm_series = extract_rdm(files, est_type=est_type, rdm_type=rdm_type, ix=ix)
rdm_av = rdm_series[skip:].mean(axis=0)
rdm_err = rdm_series[skip:].std(axis=0, ddof=1) / len(rdm_series)**0.5
return rdm_av, rdm_err
def average_correlation(gf):
ni = numpy.diagonal(gf, axis1=2, axis2=3)
mg = gf.mean(axis=0)
hole = 1.0 - numpy.sum(ni, axis=1)
hole_err = hole.std(axis=0, ddof=1) / len(hole)**0.5
spin = 0.5*(ni[:,0,:]-ni[:,1,:])
spin_err = spin.std(axis=0, ddof=1) / len(hole)**0.5
return (hole.mean(axis=0), hole_err, spin.mean(axis=0), spin_err, gf)
def average_tau(frames):
data_len = frames.size()
means = frames.mean()
err = numpy.sqrt(frames.var())
covs = frames.cov().loc[:,'ENumer'].loc[:, 'EDenom']
energy = means['ENumer'] / means['EDenom']
sqrtn = numpy.sqrt(data_len)
energy_err = ((err['ENumer']/means['ENumer'])**2.0 +
(err['EDenom']/means['EDenom'])**2.0 -
2*covs/(means['ENumer']*means['EDenom']))**0.5
energy_err = abs(energy/sqrtn) * energy_err
eproj = means['ETotal']
eproj_err = err['ETotal']/numpy.sqrt(data_len)
weight = means['Weight']
weight_error = err['Weight']
numerator = means['ENumer']
numerator_error = err['ENumer']
results = pd.DataFrame({'ETotal': energy, 'ETotal_error': energy_err,
'Eproj': eproj,
'Eproj_error': eproj_err,
'weight': weight,
'weight_error': weight_error,
'numerator': numerator,
'numerator_error': numerator_error}).reset_index()
return results
def analyse_back_propagation(frames):
frames[['E', 'E1b', 'E2b']] = frames[['E','E1b','E2b']]
frames = frames.apply(numpy.real)
frames = frames.groupby(['nbp','dt'])
data_len = frames.size()
means = frames.mean().reset_index()
# calculate standard error of the mean for grouped objects. ddof does
# default to 1 for scipy but it's different elsewhere, so let's be careful.
errs = frames.aggregate(lambda x: scipy.stats.sem(x, ddof=1)).reset_index()
full = pd.merge(means, errs, on=['nbp','dt'], suffixes=('','_error'))
columns = full.columns.values[2:]
columns = numpy.insert(columns, 0, 'nbp')
columns = numpy.insert(columns, 1, 'dt')
return full[columns]
def analyse_itcf(itcf):
means = itcf.mean(axis=(0,1), dtype=numpy.float64)
n = itcf.shape[0]*itcf.shape[1]
errs = (
itcf.std(axis=(0,1), ddof=1, dtype=numpy.float64) / numpy.sqrt(n)
)
return (means, errs)
def analyse_simple(files, start_time):
data = pauxy.analysis.extraction.extract_hdf5_data_sets(files)
norm_data = []
for (g, f) in zip(data, files):
(m, norm, bp, itcf, itcfk, mixed_rdm, bp_rdm) = g
dt = m.get('qmc').get('dt')
free_projection = m.get('propagators').get('free_projection')
step = m.get('qmc').get('nmeasure')
read_rs = m.get('psi').get('read_file') is not None
nzero = numpy.nonzero(norm['Weight'].values)[0][-1]
start = int(start_time/(step*dt)) + 1
if read_rs:
start = 0
if free_projection:
reblocked = average_fp(norm[start:nzero])
else:
reblocked = reblock_mixed(norm[start:nzero].apply(numpy.real))
columns = pauxy.analysis.extraction.set_info(reblocked, m)
norm_data.append(reblocked)
return
|
pd.concat(norm_data)
|
pandas.concat
|
from __future__ import with_statement, print_function, absolute_import
from itertools import *
from functools import *
import re
from collections import OrderedDict
import numpy
import pandas
from pandas import DataFrame, Series
from stitch.core.utils import Base
# ------------------------------------------------------------------------------
'''The stitch_phrase module contains the StitchPhrase class.
The StitchPhrase class is used for parsing strings and generating regular
expressions according to the DTT (determiner, token, terminator) paradigm.
Platform:
Unix
Author:
<NAME> <<EMAIL>> <http://www.AlexBraunVFX.com>
'''
SEP = '\xff'
class StitchWord(Base):
'''
Class for representing a word within the Stitch grammatical paradigm
A StitchWord functions as a token within a larger combinatorial grammar.
They are defined by three components rather a single one. The first is
called the "determiner", which is a list of regular expressions used to
demarcate the beginning of the word. The second is called the "token",
which is a list of regular expressions used to capture the substring you are
actually interested in. The last is called the "terminator", which is a
list of regular expressions used to demarcate the end of the word. This
three-part paradigm is called the DKT (Determiner, Token, Terminator)
paradigm.
With this paradigm, StitchWords can not only parse strings by internally
defined regular expressions, but they can diagnose what is wrong with them
when they fail and repair them. Both diagnostics and repair function by
means of component mutations made possible by the DKT paradigm.
StitchWords are fit into StitchPhrases which utilize these components in
order to chain words together into new phrase structures and perform
mutations.
'''
def __init__(self, descriptor='token',
determiners=[''], tokens=['.*'], terminators=[''],
flags=0, capture=[0, 1, 0], restricted=True,
data=None):
self._restricted = restricted
def reduce(raw):
raw = re.sub('(?<!\\\\)\{.*(?<!\\\\)\}', '', raw)
raw = re.sub('(?<!\\\\)\(.*(?<!\\\\)\)', '', raw)
raw = re.sub('(?<!\\\\)[.?+*^$\[\]]', '', raw)
return raw
self._data = data
if not data:
markers = determiners + terminators
markers = [reduce(x) for x in markers]
markers =
|
DataFrame(markers, columns=['raw'])
|
pandas.DataFrame
|
import unittest
from parameterized import parameterized
import pandas
import numpy
import pdrle
class TestPdrle(unittest.TestCase):
# test encode
@parameterized.expand([
[pandas.Series(["a", "a", "b", "b", "b", "a", "a", "c"]),
pandas.DataFrame({"vals": ["a", "b", "a", "c"],
"runs": [2, 3, 2, 1]})],
[pandas.Series(["home", "home", "home", "home"]),
pandas.DataFrame({"vals": ["home"],
"runs": [4]})],
[pandas.Series(["home", "home", numpy.nan, numpy.nan, numpy.nan, "home", "home"]),
pandas.DataFrame({"vals": ["home", numpy.nan, "home"],
"runs": [2, 3, 2]})],
[pandas.Series([1, 1, 1, 1, 1, 1, 1]),
pandas.DataFrame({"vals": [1],
"runs": [7]})],
[pandas.Series([2]),
pandas.DataFrame({"vals": [2],
"runs": [1]})],
[pandas.Series({"a": 1, "b": 1, "c": numpy.nan, "d": numpy.nan, "e": numpy.nan, "f": 2}),
pandas.DataFrame({"vals": [1, numpy.nan, 2],
"runs": [2, 3, 1]})]
])
def test_encode(self, input_data, expected_output):
actual_output = pdrle.encode(input_data)
pandas.testing.assert_frame_equal(actual_output, expected_output)
# test decode
@parameterized.expand([
[pandas.Series(["a", "a", "b", "b", "b", "a", "a", "c"]),
pandas.DataFrame({"vals": ["a", "b", "a", "c"],
"runs": [2, 3, 2, 1]})],
[pandas.Series([1, 1, 1, 1, 1, 1, 1]),
pandas.DataFrame({"vals": [1],
"runs": [7]})],
[pandas.Series([2]),
pandas.DataFrame({"vals": [2],
"runs": [1]})]
])
def test_decode(self, expected_output, input_data):
actual_output = pdrle.decode(input_data.vals, input_data.runs)
pandas.testing.assert_series_equal(actual_output, expected_output)
# test get_id
@parameterized.expand([
[
|
pandas.Series(["a", "a", "b", "b", "b", "a", "a", "c"])
|
pandas.Series
|
# This script extracts the execution time for
# various different settings of tsfresh
# using different input data
# Attention: it will run for ~half a day
# Do these calculations in a controlled environment
# (e.g. a cloud provider VM)
# You will need to have b2luigi installed.
from tsfresh.feature_extraction import ComprehensiveFCParameters, MinimalFCParameters, extract_features
import pandas as pd
import numpy as np
from time import time
import b2luigi as luigi
import json
class DataCreationTask(luigi.Task):
"""Create random data for testing"""
num_ids = luigi.IntParameter(default=100)
time_series_length = luigi.IntParameter()
random_seed = luigi.IntParameter()
def output(self):
yield self.add_to_output("data.csv")
def run(self):
np.random.seed(self.random_seed)
df = pd.concat([
pd.DataFrame({
"id": [i] * self.time_series_length,
"time": range(self.time_series_length),
"value": np.random.randn(self.time_series_length)
})
for i in range(self.num_ids)
])
with self._get_output_target("data.csv").open("w") as f:
df.to_csv(f)
@luigi.requires(DataCreationTask)
class TimingTask(luigi.Task):
"""Run tsfresh with the given parameters"""
feature_parameter = luigi.DictParameter(hashed=True)
n_jobs = luigi.IntParameter()
try_number = luigi.IntParameter()
def output(self):
yield self.add_to_output("result.json")
def run(self):
input_file = self._get_input_targets("data.csv")[0]
with input_file.open("r") as f:
df =
|
pd.read_csv(f)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
assert_frame_equal(observedw, observed)
assert_frame_equal(censoredw, censored)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
old_value_at_time_0 = seed_df["var1"].iloc[0]
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0, "var2": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=False)
expected = pd.DataFrame.from_records(
[{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0 + old_value_at_time_0, "var2": 2.0}]
)
|
assert_frame_equal(df, expected, check_like=True)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
import re
from nltk.tokenize import MWETokenizer,word_tokenize
import unicodedata
import codecs
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
TRAIN_FILE = "train.csv"
TEST_FILE = "test.csv"
def import_data():
tag =
|
pd.read_excel('bow_tag.xlsx')
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat =
|
Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
|
pandas.Categorical
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import numpy as np
import pandas as pd
import pytest
import pytz
from eemeter.transform import (
as_freq,
clean_caltrack_billing_data,
downsample_and_clean_caltrack_daily_data,
clean_caltrack_billing_daily_data,
day_counts,
get_baseline_data,
get_reporting_data,
get_terms,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
overwrite_partial_rows_with_nan,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_daily_all_nones_instantaneous(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D", series_type="instantaneous")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_daily_all_nones(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (28,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_hourly = as_freq(temperature_data, freq="H", series_type="instantaneous")
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (811,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_month_start = as_freq(temperature_data, freq="MS", series_type="instantaneous")
assert as_month_start.shape == (29,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq="MS")).mean()
assert temperature_data.shape == (28,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_as_freq_perserves_nulls(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
monthly_with_nulls = meter_data[meter_data.index.year != 2016].reindex(
meter_data.index
)
daily_with_nulls = as_freq(monthly_with_nulls.value, freq="D")
assert (
round(monthly_with_nulls.value.sum(), 2)
== round(daily_with_nulls.sum(), 2)
== 11094.05
)
assert monthly_with_nulls.value.isnull().sum() == 13
assert daily_with_nulls.isnull().sum() == 365
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = pd.Series([], index=index)
counts = day_counts(data.index)
assert counts.shape == (0,)
def test_get_baseline_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(meter_data)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_baseline_data_with_end(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(meter_data, end=blackout_start_date)
assert meter_data.shape != baseline_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date, max_days=None
)
assert meter_data.shape != baseline_data.shape == (9595, 1)
assert len(warnings) == 0
def test_get_baseline_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
with pytest.raises(NoBaselineDataError):
get_baseline_data(meter_data, end=pd.Timestamp("2000").tz_localize("UTC"))
def test_get_baseline_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, start=start, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_start"
assert (
warning.description
== "Data does not have coverage at requested baseline start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_baseline_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, end=end, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_end"
assert (
warning.description
== "Data does not have coverage at requested baseline end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_baseline_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_n_days_billing_period_overshoot(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2017, 11, 9, tzinfo=pytz.UTC),
max_days=45,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 526.25
assert len(warnings) == 0
def test_get_baseline_data_too_far_from_date(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
end_date = datetime(2020, 11, 9, tzinfo=pytz.UTC)
max_days = 45
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 1393.4
assert len(warnings) == 0
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (3, 1)
assert round(baseline_data.value.sum(), 2) == 2043.92
assert len(warnings) == 0
# Includes 3 data points because data at index -3 is closer to start target
# then data at index -2
start_target = baseline_data.index[-1] - timedelta(days=max_days)
assert abs((baseline_data.index[0] - start_target).days) < abs(
(baseline_data.index[1] - start_target).days
)
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
def test_get_reporting_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(meter_data)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_reporting_data_with_start(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(meter_data, start=blackout_end_date)
assert meter_data.shape != reporting_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date, max_days=None
)
assert meter_data.shape != reporting_data.shape == (9607, 1)
assert len(warnings) == 0
def test_get_reporting_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
with pytest.raises(NoReportingDataError):
get_reporting_data(meter_data, start=pd.Timestamp("2030").tz_localize("UTC"))
def test_get_reporting_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
reporting_data, warnings = get_reporting_data(
meter_data, start=start, max_days=None
)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_start"
assert (
warning.description
== "Data does not have coverage at requested reporting start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_reporting_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
reporting_data, warnings = get_reporting_data(meter_data, end=end, max_days=None)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_end"
assert (
warning.description
== "Data does not have coverage at requested reporting end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_reporting_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_terms_unrecognized_method(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index, term_lengths=[365], method="unrecognized")
def test_get_terms_unsorted_index(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index[::-1], term_lengths=[365])
def test_get_terms_bad_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
terms = get_terms(
meter_data.index,
term_lengths=[60, 60, 60],
term_labels=["abc", "def"], # too short
)
def test_get_terms_default_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert [t.label for t in terms] == ["term_001", "term_002", "term_003"]
def test_get_terms_custom_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(
meter_data.index, term_lengths=[60, 60, 60], term_labels=["abc", "def", "ghi"]
)
assert [t.label for t in terms] == ["abc", "def", "ghi"]
def test_get_terms_empty_index_input(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index[:0], term_lengths=[60, 60, 60])
assert len(terms) == 0
def test_get_terms_strict(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
strict_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="strict",
)
assert len(strict_terms) == 2
year1 = strict_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (12,)
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert (
year1.target_end_date
== pd.Timestamp("2017-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert (
year1.actual_start_date
== year1.index[0]
== pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
)
assert (
year1.actual_end_date
== year1.index[-1]
== pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
)
assert year1.actual_term_length_days == 332
assert year1.complete
year2 = strict_terms[1]
assert year2.index.shape == (13,)
assert year2.label == "year2"
assert year2.target_start_date == pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
assert (
year2.target_end_date
== pd.Timestamp("2018-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year2.target_term_length_days == 365
assert (
year2.actual_start_date
== year2.index[0]
== pd.Timestamp("2016-12-19 06:00:00+00:00", tz="UTC")
)
assert (
year2.actual_end_date
== year2.index[-1]
== pd.Timestamp("2017-12-22 06:00:00+0000", tz="UTC")
)
assert year2.actual_term_length_days == 368
assert year2.complete
def test_get_terms_nearest(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
nearest_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="nearest",
)
assert len(nearest_terms) == 2
year1 = nearest_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (13,)
assert year1.index[0] == pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
assert year1.index[-1] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert year1.actual_term_length_days == 365
assert year1.complete
year2 = nearest_terms[1]
assert year2.label == "year2"
assert year2.index.shape == (13,)
assert year2.index[0] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year2.index[-1] == pd.Timestamp("2018-01-20 06:00:00+0000", tz="UTC")
assert year2.target_start_date == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year1.target_term_length_days == 365
assert year2.actual_term_length_days == 364
assert not year2.complete # no remaining index
# check completeness case with a shorter final term
nearest_terms = get_terms(
meter_data.index,
term_lengths=[365, 340],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="nearest",
)
year2 = nearest_terms[1]
assert year2.label == "year2"
assert year2.index.shape == (12,)
assert year2.index[0] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year2.index[-1] == pd.Timestamp("2017-12-22 06:00:00+00:00", tz="UTC")
assert year2.target_start_date == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert year2.target_term_length_days == 340
assert year2.actual_term_length_days == 335
assert year2.complete # has remaining index
def test_term_repr(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert repr(terms[0]) == (
"Term(label=term_001, target_term_length_days=60, actual_term_length_days=29,"
" complete=True)"
)
def test_remove_duplicates_df():
index = pd.DatetimeIndex(["2017-01-01", "2017-01-02", "2017-01-02"])
df = pd.DataFrame({"value": [1, 2, 3]}, index=index)
assert df.shape == (3, 1)
df_dedupe = remove_duplicates(df)
assert df_dedupe.shape == (2, 1)
assert list(df_dedupe.value) == [1, 2]
def test_remove_duplicates_series():
index = pd.DatetimeIndex(["2017-01-01", "2017-01-02", "2017-01-02"])
series = pd.Series([1, 2, 3], index=index)
assert series.shape == (3,)
series_dedupe = remove_duplicates(series)
assert series_dedupe.shape == (2,)
assert list(series_dedupe) == [1, 2]
def test_as_freq_hourly_to_daily(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
meter_data.iloc[-1]["value"] = np.nan
assert meter_data.shape == (19417, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (811,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21926.0
def test_as_freq_daily_to_daily(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
assert meter_data.shape == (810, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (810,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21925.8
def test_as_freq_hourly_to_daily_include_coverage(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
meter_data.iloc[-1]["value"] = np.nan
assert meter_data.shape == (19417, 1)
as_daily = as_freq(meter_data.value, freq="D", include_coverage=True)
assert as_daily.shape == (811, 2)
assert round(meter_data.value.sum(), 1) == round(as_daily.value.sum(), 1) == 21926.0
def test_clean_caltrack_billing_daily_data_billing(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
cleaned_data = clean_caltrack_billing_daily_data(meter_data, "billing_monthly")
assert cleaned_data.shape == (27, 1)
pd.testing.assert_frame_equal(meter_data, cleaned_data)
def test_clean_caltrack_billing_daily_data_daily(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
cleaned_data = clean_caltrack_billing_daily_data(meter_data, "daily")
assert cleaned_data.shape == (810, 1)
|
pd.testing.assert_frame_equal(meter_data, cleaned_data)
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python
# System
import tempfile
import bz2
import os
import time
import random
from multiprocessing import Pool
# Third Party
import pandas as pd
import numpy as np
import randomstate as rnd
# First Party
from benchmark_base import Benchmark
from submission_criteria.concordance import get_competition_variables_from_df, has_concordance, get_sorted_split
N_SAMPLES = 100 * 1000
N_RUNS = 250
class BenchmarkConcordance(Benchmark):
@staticmethod
def load_data():
data_frames = dict()
for sample_type, sample_file in [('train',
'data/sample_training.csv.bz2'),
('predict',
'data/sample_tournament.csv.bz2'),
('result',
'data/sample_result.csv.bz2')]:
with tempfile.NamedTemporaryFile() as temp_file, \
open(temp_file.name, 'wb') as uncompressed_file, \
bz2.BZ2File(sample_file, 'rb') as compressed_file:
for data in iter(lambda: compressed_file.read(1000 * 1024),
b''):
uncompressed_file.write(data)
data_frames[sample_type] = pd.read_csv(temp_file)
return data_frames['train'], data_frames['predict'], data_frames[
'result']
def gen_more_data(self, train: pd.DataFrame, predict: pd.DataFrame,
result: pd.DataFrame):
new_train = self.gen_similar_df(train, data_types=['train'])
new_predict = self.gen_similar_df(
predict, data_types=['live', 'validation', 'test'])
sample = result.sample(
len(new_predict), replace=True).probability.copy().values.ravel()
new_result = pd.DataFrame.from_dict({
'id':
new_predict.id.copy(),
'probability':
sample + rnd.normal(
loc=0.0, scale=0.025, size=(len(new_predict), ))
})
return new_train, new_predict, new_result
@staticmethod
def gen_similar_df(df: pd.DataFrame, data_types: list) -> pd.DataFrame:
sample_batch_size = 500
new_df = pd.DataFrame(data=None, columns=df.columns)
features = [col for col in df.columns if 'feature' in col]
for batch_nr in range(N_SAMPLES // sample_batch_size):
sample = df.sample(sample_batch_size, replace=True)
sample = sample[features] + rnd.normal(
loc=0.0, scale=0.1, size=sample[features].shape)
sample = sample.as_matrix()
new_ids = np.array([
batch_nr * sample_batch_size + j
for j in range(sample_batch_size)
])
data_types = [
random.choice(data_types) for _ in range(sample_batch_size)
]
new_batch = {
'id':
new_ids,
'era': [
'era%s' % random.choice([i + 1 for i in range(99)])
for _ in range(sample_batch_size)
],
'data_type':
data_types,
'target': [
random.choice([0, 1])
if data_types[i] != 'live' else np.nan
for i in range(sample_batch_size)
]
}
for f_num, feature in enumerate(features):
new_batch[feature] = sample[:, f_num]
new_df = pd.concat((new_df,
|
pd.DataFrame.from_dict(new_batch)
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 14:02:30 2017
@author: rickdberg
Apply gridded datasets to individual hole locations
['etopo1_depth', 'surface_porosity', 'sed_thickness_combined',
'crustal_age','coast_distance', 'ridge_distance', 'seamount',
'surface_productivity','toc_seiter', 'opal', 'caco3',
'sed_rate_burwicz', 'woa_temp', 'woa_salinity', 'woa_o2',
'caco3_archer','acc_rate_archer','toc_combined', 'toc_wood',
'sed_rate_combined','lithology','lith1','lith2','lith3','lith4',
'lith5','lith6','lith7','lith8','lith9','lith10','lith11',
'lith12','lith13']
"""
import numpy as np
import pandas as pd
import scipy as sp
import rasterio
from user_parameters import (engine, hole_info, std_grids_path, ml_inputs_path)
# Hole data
sql = """SELECT *
FROM {}
; """.format(hole_info)
hole_data =
|
pd.read_sql(sql, engine)
|
pandas.read_sql
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from json import JSONDecoder
import random
import pygal
from pygal.style import Style
import pandas
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
# ############### Errors ################
class DateError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# ############### Tools ################
def buildDoubleIndex(index1, index2, datatype):
it = -1
newindex1 = []
for index in index2:
if index == 0:
it += 1
newindex1.append(index1[it])
arrays = [newindex1, index2]
tuples = list(zip(*arrays))
return pandas.MultiIndex.from_tuples(tuples, names=['event', datatype])
def buildNewColumn(index2, column):
it = -1
newcolumn = []
for index in index2:
if index == 0:
it += 1
newcolumn.append(column[it])
return newcolumn
def dateInRange(datetimeTested, begin=None, end=None):
if begin is None:
begin = datetime(1970, 1, 1)
if end is None:
end = datetime.now()
return begin <= datetimeTested <= end
def addColumn(dataframe, columnList, columnName):
dataframe.loc[:, columnName] = pandas.Series(columnList, index=dataframe.index)
def toDatetime(date):
return parse(date)
def checkDateConsistancy(begindate, enddate, lastdate):
if begindate is not None and enddate is not None:
if begindate > enddate:
raise DateError('begindate ({}) cannot be after enddate ({})'.format(begindate, enddate))
if enddate is not None:
if toDatetime(enddate) < lastdate:
raise DateError('enddate ({}) cannot be before lastdate ({})'.format(enddate, lastdate))
if begindate is not None:
if toDatetime(begindate) > datetime.now():
raise DateError('begindate ({}) cannot be after today ({})'.format(begindate, datetime.now().date()))
def setBegindate(begindate, lastdate):
return max(begindate, lastdate)
def setEnddate(enddate):
return min(enddate, datetime.now())
def getLastdate(last):
return (datetime.now() - timedelta(days=int(last))).replace(hour=0, minute=0, second=0, microsecond=0)
# ############### Formatting ################
def eventsListBuildFromList(filename):
with open(filename, 'r') as myfile:
s = myfile.read().replace('\n', '')
decoder = JSONDecoder()
s_len = len(s)
Events = []
end = 0
while end != s_len:
Event, end = decoder.raw_decode(s, idx=end)
Events.append(Event)
data = []
for e in Events:
data.append(
|
pandas.DataFrame.from_dict(e, orient='index')
|
pandas.DataFrame.from_dict
|
import argparse
import gc
import logging
import os
import re
import traceback
import warnings
from functools import partial
from math import ceil
from multiprocessing import Pool
import pandas as pd
from util import (download_csv_files, get_cos_client, get_mainfest_header, get_manifest_data, get_md5, load_config,
local_file_append_df, mkdir, now_time_fmt)
warnings.filterwarnings("ignore", 'This pattern has match groups') # uncomment to suppress the UserWarning
parser = argparse.ArgumentParser(description="Anylysis inventory")
parser.add_argument("-secret_id", help="S3 Access Id", action="store")
parser.add_argument("-secret_key", help="S3 Access Key", action="store")
parser.add_argument("-src_manifest", help="Source S3 Manifest Path", action="store")
parser.add_argument("-src_region", help="Source S3 Bucket Region", action="store")
parser.add_argument("-src_bucket", help="Source S3 Bucket", action="store")
parser.add_argument("-dst_manifest", help="Destination S3 Manifest Path", action="store")
parser.add_argument("-dst_region", help="Destination S3 Bucket Region", action="store")
parser.add_argument("-dst_bucket", help="Destination S3 Bucket", action="store")
parser.add_argument("-match_key", help="Match Field In Manifest, Default: Key", nargs='*')
parser.add_argument('-start_time', help="Start Time", action='store')
parser.add_argument('-end_time', help="End Time", action='store')
parser.add_argument('-d', '--debug', action='store_true', help='print debug messages to stderr')
parser.add_argument("-c", help="Specify config_path", action="store")
args = parser.parse_args()
# * Pandas显示所有行和列
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
"""
Created on Thu Nov 7, 2019
@author: <NAME>
"""
import serial # `pyserial` package; NOT `serial` package
import warnings
import pandas as pd
import numpy as np
import time
import os
import sys
from datetime import datetime
try:
from serial.tools import list_ports
IMPORTED_LIST_PORTS = True
except ValueError:
IMPORTED_LIST_PORTS = False
from .options import SETTINGS_DICT
# link to usb-serial driver for macOS
_L1 = "http://www.prolific.com.tw/UserFiles/files/PL2303HXD_G_Driver_v2_0_0_20191204.zip"
# blog post explaining how to bypass blocked extensions
# need this because no Big Sur version of driver as of Jan 7 2020.
_L2 = "https://eclecticlight.co/2019/06/01/how-to-bypass-mojave-10-14-5s-new-kext-security/"
class LockInError(Exception):
"""named exception for LockIn serial port connection issues"""
pass
class LockIn(object):
"""
represents a usable connection with the lock-in amplifier
"""
SWEEP_HEADER = "{:>3} \t {:>15} \t {:>15} \t {:>15}"
SWEEP_BLANK = "{:>3d} \t {:>15,.2f} \t {:>15,.4e} \t {:>15,.4e}"
@staticmethod
def get_serial(comm_port):
return serial.Serial(comm_port,
baudrate=19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=3)
DEFAULT_PORTS = {
'darwin': ['/dev/cu.usbserial-1410'],
'win32': ['COM5'],
'linux': ['/dev/ttyUSB0']
}
def __init__(self, comm_port: str = None):
# (detect os and) set communication port
self._comm = None
if comm_port is not None:
try:
self._comm = LockIn.get_serial(comm_port)
except serial.SerialException:
print("lockintools: could not connect to port: %s" % comm_port)
else:
print("lockintools: trying default ports for platform: %s" % sys.platform)
for cp in LockIn.DEFAULT_PORTS[sys.platform]:
try:
self._comm = LockIn.get_serial(cp)
break
except serial.SerialException:
print("lockintools: could not connect to port: %s" % cp)
if self._comm is None and IMPORTED_LIST_PORTS:
print("lockintools: tying to detect port and auto-connect...")
for cp_match in list_ports.grep("(usb|USB)"):
cp_str = str(cp_match).split('-')[0].strip()
try:
self._comm = LockIn.get_serial(cp_str)
break
except serial.SerialException:
print("lockintools: could not connect to port: %s" % cp_str)
if self._comm is None:
raise LockInError("lockintools: CONNECTION FAILED! Do you have a driver installed?")
print("lockintools: SUCCESS! Connection established.")
self.print_to_stdout = True
@property
def comm(self):
# `serial.Serial` object for handling communications
return self._comm
def close(self):
"""closes communication port"""
if self.comm.is_open:
self.comm.close()
def open(self):
"""(re)-opens communication port"""
if not self.comm.is_open:
self.comm.open()
def cmd(self, command):
"""execute arbitrary lockin command"""
self.comm.write(str.encode(command + '\n'))
self.comm.flush()
if '?' in command:
state = bytes.decode(self.comm.readline())
return state
else:
return
def set_input_mode(self, mode):
"""set lockin input configuration"""
if mode == "A":
self.cmd("ISRC0")
elif mode == "A-B":
self.cmd("ISRC1")
elif mode == "I":
self.cmd("ISRC2")
elif mode == "I100":
self.cmd("ISRC3")
else:
raise ValueError("invalid mode {}, valid values are 'A', 'A-B', 'I', or 'I100'"
.format(mode))
def set_coupling_mode(self, mode):
if mode == "AC":
self.cmd("ICPL0")
elif mode == "DC":
self.cmd("ICPL1")
else:
raise ValueError("invalid mode {}, valid values are 'AC' or 'DC'"
.format(mode))
def set_freq(self, freq):
"""set lock-in amp. frequency"""
command = 'FREQ' + str(freq)
return self.cmd(command)
def set_ampl(self, ampl):
"""set lock-in amp. voltage amplitude"""
if ampl > 5.:
raise ValueError("can not exceed amplitude of 5V")
command = 'SLVL' + str(ampl)
return self.cmd(command)
def set_sens(self, sens):
"""set lock-in amp. sensitivity"""
if 0 <= sens <= 26:
self.cmd('SENS' + str(sens))
else:
raise ValueError("sensitivity setting must be between 0 (1 nV) and "
"26 (1 V)")
def set_harm(self, harm):
"""set lock-in amp. detection harmonic"""
harm = int(harm)
if 1 <= harm <= 19999:
self.cmd('HARM' + str(harm))
else:
raise ValueError("harmonic must be between 1 and 19999")
def get_reading(self, ch, meas_time=0.1, stdev=False):
"""
read average value from channel `ch` over `meas_time` seconds
optionally, also return standard deviation (`stdev=True`)
"""
if not (ch == 1 or ch == 2):
raise ValueError("channel `ch` should be 1 or 2")
self.cmd("REST")
self.cmd("STRT")
time.sleep(meas_time)
self.cmd("PAUS")
N = self.cmd("SPTS?")
r_str = self.cmd("TRCA?" + str(ch) + ",0," + N)
r = [float(ri) for ri in r_str.split(',')[:-1]]
if stdev:
return np.mean(r), np.std(r)
return np.mean(r)
def get_x(self, meas_time=0.1, stdev=False):
return self.get_reading(ch=1, meas_time=meas_time, stdev=stdev)
def get_y(self, meas_time=0.1, stdev=False):
return self.get_reading(ch=2, meas_time=meas_time, stdev=stdev)
def sweep(self, label: str, freqs, ampls, sens: int, harm: int,
stb_time: float = 9.,
meas_time: float = 1.,
ampl_time: float = 5.,
L_MAX: int = 50):
"""
Conduct a frequency sweep measurement across one or more voltage
amplitudes.
:param label: (string) label for the sweep data
:param freqs: (scalar or array-like) freqs. to sweep over
:param ampls: (scalar or array-like) amplitudes to sweep over
:param sens: (int) integer indicating lock-in amp. sensitivity setting
:param harm: (int) detection harmonic
:param stb_time: (float) time (s) for stabilization at each freq.
:param meas_time: (float) time (s) for data collection at each freq.
:param ampl_time: (float) time (s) for stabilization at each voltage
:param L_MAX: (int) maximum data array size
:return: (lockin.SweepData) container of pandas `DataFrame`s for
in- and out-of-phase detected voltages, and variances thereof
"""
self.set_harm(harm)
self.set_sens(sens)
ampls = np.asarray(ampls)
freqs = np.asarray(freqs)
if ampls.ndim == 0:
ampls = ampls[None]
if freqs.ndim == 0:
freqs = freqs[None]
# buffer arrays for in- and out-of-phase data
X = np.full((len(ampls), len(freqs), L_MAX), fill_value=np.nan)
Y = np.full((len(ampls), len(freqs), L_MAX), fill_value=np.nan)
for i, V in enumerate(ampls):
self._print('V = {:.2f} volts'.format(V))
self._print('waiting for stabilization after amplitude change...')
self.set_ampl(V)
self.set_freq(freqs[0])
time.sleep(ampl_time)
self._print('')
self._print(LockIn.SWEEP_HEADER.format('', 'freq [Hz]', 'X [V]', 'Y [V]'))
for j, freq in enumerate(freqs):
# self._print("waiting for stabilization at f = {:.4f} Hz "
# "({:d}/{:d})".format(freq, j + 1, len(freqs)))
self.set_freq(freq)
self.cmd('REST')
time.sleep(stb_time)
# self._print('taking measurement')
# beep(repeat=1)
self.cmd('STRT')
time.sleep(meas_time)
self.cmd('PAUS')
# self._print('extracting values')
N = self.cmd('SPTS?')
x_str = self.cmd('TRCA?1,0,' + N)
y_str = self.cmd('TRCA?2,0,' + N)
# list of values measured at a single point
# last character is a newline character
x = np.array([float(_) for _ in x_str.split(',')[:-1]])
y = np.array([float(_) for _ in y_str.split(',')[:-1]])
try:
X[i, j][:len(x)] = x
Y[i, j][:len(x)] = y
except ValueError:
warnings.warn("buffer array overflow encountered at point "
"f = {:.1f} Hz, V = {:.1f} volts"
.format(freq, V))
X[i, j] = x[:L_MAX]
Y[i, j] = y[:L_MAX]
x_ = np.mean(x[~np.isnan(x)])
y_ = np.mean(y[~np.isnan(y)])
self._print(LockIn.SWEEP_BLANK.format(j + 1, freq, x_, y_))
self._print('')
return SweepData(X, Y, freqs, ampls, label, sens, harm)
def get_config(self):
raw_config = {}
for key in SETTINGS_DICT.keys():
if key != 'names':
raw_config[key] = self.cmd(key + '?')
return raw_config
def _print(self, s):
if self.print_to_stdout:
print(s)
class SweepData(object):
"""
Contains the data relevant to a single sweep.
i.e. the amplitude of the oscillations described by the `harm`th harmonic of
the voltage measured across the heater line or shunt, for a driving
voltage `V` in `Vs` at a frequency `freq` in `freqs`.
The digested values (ex: `V_x[i]` and `dV_x[i]) at each point are the
average of many measurements at that point and the variance of those
measurements.
"""
def __init__(self, X, Y, freqs, Vs, label, sens, harm):
dt1 = datetime.now()
dt = dt1.strftime("%d-%m-%Y_%H-%M")
self.ID = '_'.join([label, 'HARM' + str(harm), 'SENS' + str(sens), dt])
# frequency and voltage ranges
self.freqs = freqs
self.Vs = Vs
# full raw buffer output from lock-in (padded with NaNs)
self.X = X
self.Y = Y
n = len(freqs)
m = len(Vs)
# initialing arrays for digests
V_x = np.zeros((m, n)) # in-phase amplitudes (left lockin display)
V_y = np.zeros((m, n)) # out-of-phase amplitudes (right lockin display)
dV_x = np.zeros((m, n)) # variances of buffer outputs over time
dV_y = np.zeros((m, n)) # variances of buffer output over time
for i in range(m):
for j in range(n):
_X_ = X[i, j]
_Y_ = Y[i, j]
_X_ = _X_[~np.isnan(_X_)]
_Y_ = _Y_[~np.isnan(_Y_)]
V_x[i, j] = np.mean(_X_)
V_y[i, j] = np.mean(_Y_)
dV_x[i, j] = np.std(_X_)
dV_y[i, j] = np.std(_Y_)
# converting to DataFrames for readability
self.V_x = pd.DataFrame(V_x.T, index=freqs, columns=Vs)
self.V_y =
|
pd.DataFrame(V_y.T, index=freqs, columns=Vs)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
|
tm.assert_almost_equal(result, expected)
|
pandas.util.testing.assert_almost_equal
|
import sys
import pandas as pd
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--f', dest='infile',
help='gene and transcript id tsv from talon read_annot file')
parser.add_argument('--prefix', dest='prefix',
help='prefix for saved plots.')
args = parser.parse_args()
return args
# subsample and compute tpm
def subsample_transcripts(df1, df2, n):
if n == 'max':
sub1 = df1.copy(deep=True)
sub2 = df2.copy(deep=True)
else:
sub1 = df1.sample(n, random_state=1)
sub2 = df2.sample(n, random_state=1)
# concatenate the different reps
sub_df = pd.concat([sub1, sub2])
# filter here
# only known transcripts
sub_df = sub_df.loc[sub_df.transcript_novelty == 'Known']
# only nncs/nics that pass the reproducibility filter
# TODO
sub_df = sub_df.groupby(['gene_ID', 'transcript_ID'])['transcript_ID'].count().to_frame()
sub_df.rename({'transcript_ID': 'counts'}, axis=1, inplace=True)
sub_df.reset_index(inplace=True)
total_count = sub_df.counts.sum()
sub_df['tpm'] = (sub_df.counts*1000000)/total_count
return sub_df
def compute_total_t_tpm(df):
ab = df.copy(deep=True)
# only known transcripts and genes
ab = ab.loc[(ab.gene_novelty == 'Known')&(ab.transcript_novelty == 'Known')]
# # remove genomic transcripts
# ab = ab.loc[ab.transcript_novelty != 'Genomic']
# # remove entries not in the whitelist
# ab = ab.merge(whitelist, how='inner', on=['transcript_ID', 'gene_ID'])
# # remove ISMs
# ab = ab.loc[ab.transcript_novelty != 'ISM']
# compute TPM
ab = ab.groupby(['gene_ID', 'transcript_ID', 'transcript_novelty'])['transcript_ID'].count().to_frame()
ab.rename({'transcript_ID': 'counts'}, axis=1, inplace=True)
ab.reset_index(inplace=True)
total_count = ab.counts.sum()
ab['ab_tpm'] = (ab.counts*1000000)/total_count
# get only relevant fields
ab = ab[['transcript_ID', 'ab_tpm']]
return ab
# subsample and compute tpm
def subsample_genes(df1, df2, n):
if n == 'max':
sub1 = df1.copy(deep=True)
sub2 = df2.copy(deep=True)
else:
sub1 = df1.sample(n, random_state=1)
sub2 = df2.sample(n, random_state=1)
# concatenate the different reps
sub_df = pd.concat([sub1, sub2])
# filter here
# only known genes
sub_df = sub_df.loc[sub_df.gene_novelty == 'Known']
sub_df = sub_df.groupby(['gene_ID'])['gene_ID'].count().to_frame()
sub_df.rename({'gene_ID': 'counts'}, axis=1, inplace=True)
sub_df.reset_index(inplace=True)
total_count = sub_df.counts.sum()
sub_df['tpm'] = (sub_df.counts*1000000)/total_count
return sub_df
def compute_total_g_tpm(df):
ab = df.copy(deep=True)
# only known genes
ab = ab.loc[ab.gene_novelty == 'Known']
# compute TPM
ab = ab.groupby(['gene_ID'])['gene_ID'].count().to_frame()
ab.rename({'gene_ID': 'counts'}, axis=1, inplace=True)
ab.reset_index(inplace=True)
total_count = ab.counts.sum()
ab['ab_tpm'] = (ab.counts*1000000)/total_count
# get only relevant fields
ab = ab[['gene_ID', 'ab_tpm']]
return ab
def gene(df, df1, df2, intervals, prefix):
# aggregate abundance and compute tpm
ab = compute_total_g_tpm(df)
# bin transcripts by expression level. How many transcripts belong to each bin?
# assign each transcript a bin based on its expression level as well
# bins = [(0,1),(1,2),(2,5),(5,10),(10,50),(50,100),(100,500),(500,ab.ab_tpm.max())]
bins = [0,5,10,50,100,500,ab.ab_tpm.max()+1]
ab_tpm = ab.ab_tpm.values.tolist()
ab_bins = np.digitize(ab_tpm, bins)
ab['bin'] = ab_bins
ab['bin_total'] = ab['bin'].map(ab['bin'].value_counts())
# remove entries from bin 1
ab = ab.loc[ab.bin != 1]
# create a bin df to keep track of how many transcripts belong to each bin
bin_df = ab[['bin', 'bin_total']].groupby(['bin', 'bin_total']).count()
bin_df.reset_index(inplace=True)
# loop through each interval and subsample df.
plot_data = pd.DataFrame(columns=['reads','bin','perc_within_10'])
for n in intervals:
sub_df = subsample_genes(df1, df2, n)
sub_df = sub_df.merge(ab, how='inner', on='gene_ID').fillna(0)
sub_df['within_5'] = (sub_df.tpm >= sub_df.ab_tpm*.9)&(sub_df.tpm <= sub_df.ab_tpm*1.1)
temp = sub_df[['bin', 'within_5', 'tpm']].groupby(['bin', 'within_5']).count()
temp.rename({'tpm':'bin_count'},axis=1,inplace=True)
temp.reset_index(inplace=True)
temp = temp.loc[temp.within_5 == True]
for b in ab.bin.unique().tolist():
if b not in temp.bin.tolist():
temp = temp.append({'bin':b,'within_5':True,'bin_count':0}, ignore_index=True)
temp = temp.merge(bin_df, how='left', on='bin')
temp['perc_within_10'] = (temp.bin_count/temp.bin_total)*100
if n == 'max':
temp['reads'] = (len(df1.index)+len(df2.index))/2
else:
temp['reads'] = n
plot_data = pd.concat([plot_data,temp[['reads', 'bin', 'perc_within_10']]])
# convert to human-readable TPM values
plot_data['TPM bin'] = plot_data.apply(lambda x: (bins[x.bin-1],bins[x.bin]), axis=1)
ax = sns.lineplot(x='reads', y='perc_within_10', hue='TPM bin', marker='o', data=plot_data)
# ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.savefig('{}_gene_nomogram.png'.format(prefix))
plt.clf()
def transcript(df, df1, df2, intervals, prefix):
# aggregate abundance over both replicates and compute tpm
# from parent abundance count
# ab = pd.read_csv(file, '\t')
# ab_cols = ab.columns[11:]
# ab['counts'] = ab[ab_cols].sum(axis=1)
# total_count = ab.counts.sum()
# ab['ab_tpm'] = (ab.counts*1000000)/total_count
# ab = ab[['transcript_ID', 'ab_tpm']]
ab = compute_total_t_tpm(df)
# bin transcripts by expression level. How many transcripts belong to each bin?
# assign each transcript a bin based on its expression level as well
# bins = [(0,1),(1,2),(2,5),(5,10),(10,50),(50,100),(100,500),(500,ab.ab_tpm.max())]
bins = [0,5,10,50,100,500,ab.ab_tpm.max()+1]
ab_tpm = ab.ab_tpm.values.tolist()
ab_bins = np.digitize(ab_tpm, bins)
ab['bin'] = ab_bins
ab['bin_total'] = ab['bin'].map(ab['bin'].value_counts())
# remove entries from bin 1
ab = ab.loc[ab.bin != 1]
# create a bin df to keep track of how many transcripts belong to each bin
bin_df = ab[['bin', 'bin_total']].groupby(['bin', 'bin_total']).count()
bin_df.reset_index(inplace=True)
# loop through each interval and subsample df.
plot_data = pd.DataFrame(columns=['reads','bin','perc_within_10'])
for n in intervals:
sub_df = subsample_transcripts(df1, df2, n)
sub_df = sub_df.merge(ab, how='inner', on='transcript_ID').fillna(0)
sub_df['within_5'] = (sub_df.tpm >= sub_df.ab_tpm*.9)&(sub_df.tpm <= sub_df.ab_tpm*1.1)
temp = sub_df[['bin', 'within_5', 'tpm']].groupby(['bin', 'within_5']).count()
temp.rename({'tpm':'bin_count'},axis=1,inplace=True)
temp.reset_index(inplace=True)
temp = temp.loc[temp.within_5 == True]
for b in ab.bin.unique().tolist():
if b not in temp.bin.tolist():
temp = temp.append({'bin':b,'within_5':True,'bin_count':0}, ignore_index=True)
temp = temp.merge(bin_df, how='left', on='bin')
temp['perc_within_10'] = (temp.bin_count/temp.bin_total)*100
if n == 'max':
temp['reads'] = (len(df1.index)+len(df2.index))/2
else:
temp['reads'] = n
plot_data = pd.concat([plot_data,temp[['reads', 'bin', 'perc_within_10']]])
# convert to human-readable TPM values
plot_data['TPM bin'] = plot_data.apply(lambda x: (bins[x.bin-1],bins[x.bin]), axis=1)
ax = sns.lineplot(x='reads', y='perc_within_10', hue='TPM bin', marker='o', data=plot_data)
# ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.savefig('{}_transcript_nomogram.png'.format(prefix))
plt.clf()
def main():
args = get_args()
df =
|
pd.read_csv(args.infile, sep='\t')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import pytest
from .conftest import DATA_DIR, assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature, tools
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_cell_from_module(sapm_default):
default = temperature.sapm_cell_from_module(50, 900,
sapm_default['deltaT'])
assert_allclose(default, 50 + 900 / 1000 * sapm_default['deltaT'])
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times =
|
pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
|
pandas.date_range
|
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.setting import setting
from src.cpePaser import day_extract
from src.cpePaser import week_extract
import pandas as pd
from src.timeOperator import timeOpt
import numpy as np
import math
import os
from src.postEva.exper_paser import exper_paser
from src.postEva.capacity_paser import capacity_paser
from src.postEva.capacity_paser import cell
from src.logger_setting.my_logger import get_logger
logger = get_logger()
engineer_map = {}
threshold_map = {}
def get_post_df():
min_month = week_extract.get_min_month()
all_file = week_extract.get_file_by_range(timeOpt.add_months(min_month, 2), timeOpt.add_months(min_month, 3))
df = pd.DataFrame(columns=setting.parameter_json["post_eva_from_day_column_name"])
for f in all_file:
file_df = pd.read_csv(f, error_bad_lines=False, index_col=False)[setting.parameter_json[
"post_eva_from_day_column_name"]]
df = df.append(file_df)
return df
def post_evaluation():
df = get_post_df()
grouped = day_extract.groupby_calc(df).apply(calc_post_eva)
result = pd.DataFrame(grouped)
result.to_csv(os.path.join(setting.post_eva_path, 'post_eva_data.csv'), index=False)
def calc_post_eva(df):
esn = df['esn'].values
total_download = np.sum(df['TotalDownload'].values) / setting.mb
total_upload = np.sum(df['TotalUpload'].values) / setting.mb
df_sort_by_date = df.sort_values('date')[['IMSI', 'IMEI', 'MSISDN']]
newst_imsi = df_sort_by_date['IMSI'].values[-1]
newst_imei = df_sort_by_date['IMEI'].values[-1]
newst_msisdn = df_sort_by_date['MSISDN'].values[-1]
data = {'esn': esn[0],
'TotalDownload': [total_download],
'TotalUpload': [total_upload],
'IMSI': [newst_imsi],
'IMEI': [newst_imei],
'MSISDN': [newst_msisdn]}
result = pd.DataFrame(data)
return result
def exper_evaluate_with_suite():
pre_result_df = pd.read_csv(os.path.join(setting.result_path, 'predict_result.csv'))[
setting.pre_for_post_columns]
total_line = pre_result_df.shape[0]
pre_result_df = pre_result_df.head(int(total_line * setting.parameter_json["top_n"] / 100))
post_result_df = pd.read_csv(os.path.join(setting.post_eva_path, 'post_eva_data.csv'))[setting.post_suite_columns]
exper_df = exper_paser.get_expericece_data()
pre_and_post_df = pd.merge(pre_result_df, post_result_df, on='esn', how='left')
df =
|
pd.merge(pre_and_post_df, exper_df, on='esn', how='left')
|
pandas.merge
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 30 21:28:29 2021
@author: u0139894
"""
import streamlit as stl
import numpy as np
import scipy.integrate as solver
import pandas as pd
def monod_g(s, mu_max, ks):
return mu_max*(s/(s + ks))
def dynamics(t, vars_t, D, dr, s_I, mu_max, ks, y):
s = vars_t[0]
x = vars_t[1]
xd = vars_t[2]
mu = monod_g(s, mu_max, ks)
dxddt = dr*x - D*xd
dsdt = D*(s_I-s) -(mu/y)*x
dxdt = (mu*x)-(D*x)-(dr*x)
return np.array([dsdt, dxdt, dxddt])
def integrate(vars_init, t_start, t_end, t_interval, params, method = 'bdf' ):
ode = solver.ode(dynamics)
# BDF method suited to stiff systems of ODEs
n_steps = (t_end - t_start)/t_interval
ode.set_integrator('vode', nsteps=n_steps, method= method)
# Time
t_step = (t_end - t_start)/n_steps
ode.set_f_params(params['D'], params['dr'], params['s_I'], params['mu_max'], params['ks'], params['y'])
ode.set_initial_value(vars_init, t_start)
t_s = []
var_s = []
while ode.successful() and ode.t < t_end:
ode.integrate(ode.t + t_step)
t_s.append(ode.t)
var_s.append(ode.y)
time = np.array(t_s)
vars_total = np.vstack(var_s).T
return time, vars_total
stl.sidebar.write('### Parameters')
stl.sidebar.write('**Potentially controlable**')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
cd = stl.sidebar.slider('culture days', 1, 50, 5, step=1, format='%.3f')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
s_0 = stl.sidebar.slider('Initial resource concentration (mM)', 0.001, 100.0, 5.55, step=0.001, format='%.3f')
stl.sidebar.write('The starting concentration of the limiting resourse in the chemostat vessel (Conc. of glucose in WC media = 5.5 mM)')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
x_0 = stl.sidebar.slider('Initial bacterial concentration (k cells/ml)', 0.0001, 5.0, 0.001, step=0.001, format='%.3f')
stl.sidebar.write('k = $10^8$; I hope to soon have a conversion factor between OD and cell counts for each strain')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
xd_0 = 0
D = stl.sidebar.slider('Dilution parameter (1/h)', 0.00001, 1.0, 0.07, step=0.0001, format='%.3f')
stl.sidebar.write('the maximum dilution allowed by the AMBR is approx. 0.07 (considering a vol of 10 mL and inflow of 0.7 ml/h)')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
s_I = stl.sidebar.slider('Feed resource concentration (mM)', 0.001, 100.0, 5.55, step=0.001, format='%.3f')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
stl.sidebar.write('**Hypothetical strain**')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
mu_max = stl.sidebar.slider('Max growth rate (1/h)', 0.00001, 3.0, 1.0, step=0.0001, format='%.3f')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
y = stl.sidebar.slider('yield (unitless)', 0.00001, 3.0, 0.5, step=0.0001, format='%.3f')
stl.sidebar.write('Although one can attribute units (e.g. (k cell/ml)/mM), they can ultimately be cancelled out')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
ks = stl.sidebar.slider('Monod constant (mM)', 1.0, 100.0, 50.0, step=0.5, format='%.3f')
stl.sidebar.write('*-----------------------------------------*\n\n\n')
dr = stl.sidebar.slider('death rate (1/h)', 0.000001, 1.0, 0.01, step=0.00001, format='%.3f')
stl.sidebar.write('Assumed constant, but it is possibly higher at very low dilution rates')
vars_init = np.array([s_0, x_0, xd_0])
params = {'D':D, 'dr':dr, 's_I':s_I, 'mu_max':mu_max, 'ks':ks, 'y':y}
t, v = integrate(vars_init, 0, cd*24, 0.1, params)
data =
|
pd.DataFrame(v.T, columns = ['S (mM)', 'LiveCells k cells/ml', 'DeadCells k cells/ml'], index=t)
|
pandas.DataFrame
|
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
##################### Related to adding metadata ##########################
###########################################################################
class TestMetadata(object):
# test add_metadata - one after the other with dupe cols
# yes overwrite
def test_add_metadata_4(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=True)
assert {'3','4'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other with dupe cols
# don'e overwrite
def test_add_metadata_3(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=False)
assert {'2', '1'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other
def test_add_metadata_2(self):
pass
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
gtf = 'files/chr11_and_Tcf3.gtf'
sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_2.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562', 'G0'],
['PB65_B017', '2', 'GM12878', 'M'],
['PB65_B018', '2', 'GM12878', 'S']]
cols = ['dataset', 'cluster', 'sample', 'cell_state']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
# test add_metadata - vanilla
def test_add_metadata(self):
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
# gtf = 'files/chr11_and_Tcf3.gtf'
# sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
# print(sg.t_df)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562'],
['PB65_B017', '2', 'GM12878'],
['PB65_B018', '2', 'GM12878']]
cols = ['dataset', 'cluster', 'sample']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
###########################################################################
############### Related to high-level dataset addition ####################
###########################################################################
class TestDataset(object):
# TODO
# add_dataset, add_transcriptome, add_annotation
# tests add_transcriptome - added after adding an annotation
def test_add_transcriptome_2(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
sg.add_transcriptome('files/test_full.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_transcriptome - vanilla
def test_add_transcriptome_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
# tests add_annotation - transcriptome already in SG
def test_add_annotation_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.add_annotation('files/test_full_annotation.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_annotation - vanilla
def test_add_annotation_1(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
# # loc_df
# data = [['chr1', 1, 0, True],
# ['chr1', 20, 1, True],
# ['chr1', 25, 2, True],
# ['chr1', 30, 3, True],
# ['chr1', 35, 4, True],
# ['chr1', 40, 5, True],
# ['chr2', 45, 6, True],
# ['chr2', 50, 7, True],
# ['chr2', 60, 8, True],
# ['chr2', 75, 10, True],
# ['chr2', 80, 11, True],
# ['chr2', 100, 12, True],
# ['chr2', 110, 13, True]]
# cols = ['chrom', 'coord', 'vertex_id', 'annotation']
# ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
# ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
# ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
#
# print('test')
# print(sg.loc_df)
# print('ctrl')
# print(ctrl_loc_df)
#
# print(sg.edge_df)
# assert 1 == 0
# # edge_df
# data = [[0, 1, '+', 'exon', 0, True],
# [1, 2],
# [2, 3],
# [3, 4],
# [4, 5],
# [5, 6],
# [6, 7],
#
#
# ]
# cols = ['v1', 'v2', 'strand', 'edge_type', 'annotation']
#
# # t_df
# data = [['test1', 'test1_tname', 'test1_gid', 'test1_gname', [0,1,2,3,4]], [0,1,2,3,4,5], True],
# ['test2', 'test2_tname', 'test2_gid', 'test2_gname', [5,6,7,8,9], [12,11,10,8,7,6], True],
# ['test4', 'test4_tname', 'test4_gid', 'test4_gname', [10], [6,7], True],
# ['test5', 'test5_tname', 'test2_gid', 'test2_gname', [5,11,12], [12,11,8,7], True],
# ['test6', 'test6_tname', 'test2_gid', 'test2_gname', [,6,7,8,9], [13,11,10,8,7,6], True]]
# cols = ['tid', 'tname', 'gid', 'gname', 'path', 'loc_path', 'annotation']
#
assert sg.annotation == True
assert 'annotation' in sg.t_df.columns
assert 'annotation' in sg.edge_df.columns
assert 'annotation' in sg.loc_df.columns
for ind, entry in sg.t_df.iterrows():
assert entry.annotation == True
assert entry.novelty == 'Known'
for ind, entry in sg.edge_df.iterrows():
assert entry.annotation == True
for ind, entry in sg.loc_df.iterrows():
assert entry.annotation == True
# tests:, label_annotated
# label annotated transcripts
def test_label_annotated(self):
sg = swan.SwanGraph()
data = [[0, [0,1]],
[1, [2,3]],
[2, [4,5]]]
sg.t_df = pd.DataFrame(data=data, columns=['tid', 'path'])
data = [[0,0,1], [1,1,2], [2,2,3], [3,3,4],
[4,4,5], [5,5,6]]
sg.edge_df = pd.DataFrame(data=data, columns=['edge_id', 'v1', 'v2'])
data = [0,1,2,3,4,5,6]
sg.loc_df = pd.DataFrame(data=data, columns=['vertex_id'])
tids = [0,1]
sg.label_annotated(tids)
ctrl_tids = [0,1]
tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
assert set(ctrl_tids) == set(tids)
ctrl_edges = [0,1,2,3]
edges = sg.edge_df.loc[sg.edge_df.annotation == True, 'edge_id'].tolist()
assert set(ctrl_edges) == set(edges)
ctrl_locs = [0,1,2,3,4]
locs = sg.loc_df.loc[sg.loc_df.annotation == True, 'vertex_id'].tolist()
assert set(ctrl_locs) == set(locs)
# add to empty sg, don't add isms
def test_add_transcriptome(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_talon.gtf', include_isms=False)
print(sg.t_df)
assert "ISM" not in sg.t_df.novelty.unique()
# assert 1 == 0
# tests if correct error is thrown when adding annotation to
# sg that already has one
def test_add_annotation_already(self):
sg = swan.SwanGraph()
sg.annotation = True
with pytest.raises(Exception) as e:
sg.add_annotation('files/Canx.gtf')
assert 'Annotation already' in str(e.value)
# add annotation to empty sg
def test_add_annotation_empty_sg(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full.gtf')
# check annotation columns
assert all(sg.t_df.annotation.tolist())
assert all(sg.edge_df.annotation.tolist())
assert all(sg.loc_df.annotation.tolist())
# check novelty column in t_df
assert len(sg.t_df.loc[sg.t_df.novelty=='Known']) == len(sg.t_df.index)
# check annotation flag
assert sg.annotation == True
# add annotation to sg with data already in it
def test_add_annotation_sg_data(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel.gtf')
sg.add_annotation('files/test_known.gtf')
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
# add annotation to sg with data where data contains dupe transcript
def test_add_annotation_sg_data_dupe_tid(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_1.gtf')
sg.add_annotation('files/test_known.gtf')
# check with coord/chr bc of reindexing fuckery not being
# remimplemented yet
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
###########################################################################
###################### Related to file parsing ############################
###########################################################################
class TestFiles(object):
# tests GTF parsing
def test_parse_gtf(self):
gtf_file = 'files/Canx.gtf'
t_df, exon_df, from_talon = swan.parse_gtf(gtf_file, True, False)
t_df.index.name = 'tid_index'
t_df = t_df.sort_values(by='tid_index')
ctrl_t_df = pd.read_csv('files/Canx_transcript.tsv',sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df = ctrl_t_df.sort_values(by='tid_index')
ctrl_exons = ctrl_t_df.exons.tolist()
ctrl_exons = [exons.split(',') for exons in ctrl_exons]
ctrl_t_df['exons'] = ctrl_exons
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - no pass_list
def test_parse_db_1(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, None, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - yes pass_list
def test_parse_db_2(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, pass_list, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
# delete entries that weren't on pass list
del ctrl_e_df['chr2_45_50_+_exon']
del ctrl_t_df['test4']
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
###########################################################################
####################### Related to DF creation ############################
###########################################################################
class TestCreateDFs(object):
# add_edge_coords, get_current_locs, get_current_edges,
# create_loc_dict, create_transcript_edge_dict create_dfs,
# tests add_edge_coords
def test_add_edge_coords(self):
sg = swan.SwanGraph()
sg = add_transcriptome_no_reorder_gtf(sg, 'files/test_full.gtf')
# sg.add_transcriptome('files/test_full.gtf')
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type',
'v1_coord', 'v2_coord']
# print(sg.edge_df.head())
edge_df = sg.add_edge_coords()
print(edge_df.head())
edge_df = edge_df[cols]
ctrl_edge_df = pd.read_csv('files/test_add_edge_coords_result.tsv', sep='\t')
ctrl_edge_df = ctrl_edge_df[cols]
# first order to make them comparable
# sort all values by their IDs
edge_df.sort_values(by='edge_id', inplace=True)
ctrl_edge_df.sort_values(by='edge_id', inplace=True)
# and order columns the same way
ctrl_edge_df = ctrl_edge_df[edge_df.columns]
print('test')
print(edge_df)
print('control')
print(ctrl_edge_df)
assert (edge_df == ctrl_edge_df).all(axis=0).all()
# tests get_current_locs with an empty swangraph
def test_get_current_locs_empty_sg(self):
sg = swan.SwanGraph()
locs, n = sg.get_current_locs()
assert locs == {}
assert n == -1
# tests get_current_locs with a swangraph with data
def test_get_current_locs_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 3, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs, n = sg.get_current_locs()
ctrl_locs = {(1,2):0, (1,3):1, (3,50):2}
assert locs == ctrl_locs
assert n == 2
# tests get_current_edges with an empty swangraph
def test_get_current_edges_empty_sg(self):
sg = swan.SwanGraph()
edges, n = sg.get_current_edges()
assert(edges == {})
assert(n == -1)
# tests get_current_edges in a sg with data
def test_get_current_edges_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 1, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
data = [[0, 0, 1, '+', 'exon'],
[1, 1, 2, '+', 'intron']]
sg.edge_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
edges, n = sg.get_current_edges()
ctrl = {(1,2,3,'+','exon'): {'edge_id': 0,
'edge_type': 'exon',
'v1': 0 ,
'v2': 1},
(1,3,50,'+','intron'): {'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2}}
assert(edges == ctrl)
assert(n == 1)
# test create_loc_dict on an empty sg
# also checks to make sure exons that use the same loc
# don't result in dupe entries in loc_df
def test_create_loc_dict_empty_sg(self):
_, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1',1): 0,
('chr1', 20): 1,
('chr1', 25): 2,
('chr1', 30): 3,
('chr1', 35): 4,
('chr1', 40): 5,
('chr2', 100): 6,
('chr2', 80): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
assert(ctrl_locs == locs)
# tests create_loc_dict when locs already exist in sg
def test_create_loc_dict_sg_data(self):
_, exons = get_test_transcript_exon_dicts()
# dummy preexisting data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1], [1, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1', 1):0,
('chr2', 80): 1,
('chr1', 20): 2,
('chr1', 25): 3,
('chr1', 30): 4,
('chr1', 35): 5,
('chr1', 40): 6,
('chr2', 100): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
print('test')
print(locs)
print('control')
print(ctrl_locs)
assert(ctrl_locs == locs)
# tests create_transcript_edge_dict empty swangraph
def test_create_transcript_edge_dict_emtpy_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,1,2,3,4],
'test2': [5,6,7,8,9],
'test3': [5,6,10,11,9],
'test4': [12],
'test5': [5,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 2,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 3,
'edge_type': 'intron',
'v1': 3,
'v2': 4
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 4,
'edge_type': 'exon',
'v1': 4,
'v2': 5
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 6,
'v2': 7
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 7,
'v2': 8
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 8,
'v2': 9
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 9,
'v2': 10
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 10,
'v2': 11
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 8,
'v2': 12
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 12,
'v2': 10
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 11,
'v2': 10
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 7,
'v2': 9
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 9,
'v2': 10
}
}
assert(edges == ctrl_edges)
# tests create_transcript_edge_dict with edges already in swangraph
def test_create_transcript_edge_dict_edge_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
# add some dummy data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1],
[1, 'chr2', 20],
[2, 'chr2', 100],
[3, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
data = [[0, 0, 1, '+', 'exon'],
[1, 2, 3, '-', 'exon']]
columns = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
sg.edge_df = pd.DataFrame(data=data, columns=columns)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,2,3,4,5],
'test2': [1,6,7,8,9],
'test3': [1,6,10,11,9],
'test4': [12],
'test5': [1,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 2,
'edge_type': 'intron',
'v1': 4,
'v2': 5
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 3,
'edge_type': 'exon',
'v1': 5,
'v2': 6
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 4,
'edge_type': 'intron',
'v1': 6,
'v2': 7
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 7,
'v2': 8
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 1,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 3,
'v2': 9
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 9,
'v2': 10
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 10,
'v2': 11
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 11,
'v2': 12
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 9,
'v2': 13
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 13,
'v2': 11
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 12,
'v2': 11
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 3,
'v2': 10
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 10,
'v2': 11
}
}
assert(edges == ctrl_edges)
# # tests create_transcript_edge_dict where transcripts already
# # # exist in the swangraph
# # def test_create_transcript_edge_dict_edge_t_sg(self):
# # pass
# # # TODO
#
# tests create_dfs with an empty sg
# also ensures that empty dict -> df -> dict conversion doesn't screw up
def test_create_dfs_empty_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path', 'novelty'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs when from_talon = True
def test_create_dfs_empty_sg_from_talon(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in a swangraph with data
def test_create_dfs_data_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
del transcripts['test2']
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df =
|
pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 13:56:11 2018
@author: <NAME>
"""
import logging
logger = logging.getLogger(__name__)
import sklearn.metrics
from sklearn.metrics.regression import _check_reg_targets, r2_score
from sklearn.metrics import silhouette_score, calinski_harabaz_score, davies_bouldin_score
from sklearn.metrics.scorer import SCORERS, _BaseScorer, type_of_target
import numpy as np
import pandas as pd
from functools import partial
class log_loss_scorer_patched(object):
""" Log Loss scorer, correcting a small issue in sklearn (labels not used) """
def __init__(self):
self._deprecation_msg = None
def __call__(self, clf, X, y, sample_weight=None):
y_pred = clf.predict_proba(X)
if not hasattr(clf, "classes_"):
raise ValueError("estimator should have a 'classes_' attribute")
if isinstance(y_pred, list):
# this means that this is a multi-target prediction
all_log_losses = [
-1.0 * sklearn.metrics.log_loss(y[:, j], y_pred[j], sample_weight=sample_weight, labels=clf.classes_[j])
for j in range(len(y_pred))
]
# Avg of all log-loss
# TODO : we could also returns everythings
return np.mean(all_log_losses)
else:
return -1.0 * sklearn.metrics.log_loss(y, y_pred, sample_weight=sample_weight, labels=clf.classes_)
class avg_roc_auc_score(object):
""" Average Roc Auc scorer, make sklearn roc auc scorer works with multi-class """
def __init__(self, average="macro"):
self.average = average
self._deprecation_msg = None
def __call__(self, clf, X, y, sample_weight=None):
y_pred = clf.predict_proba(X)
if not hasattr(clf, "classes_"):
raise ValueError("estimator should have a 'classes_' attribute")
if not y_pred.shape[1] == len(clf.classes_):
raise ValueError("estimator.classes_ isn't the same shape as predict_proba")
y2 = np.zeros(y_pred.shape)
for i, cl in enumerate(clf.classes_):
y2[:, i] = 1 * (y == cl)
classes_present = y2.sum(axis=0) > 0
return sklearn.metrics.roc_auc_score(
y2[:, classes_present], y_pred[:, classes_present], sample_weight=sample_weight, average=self.average
)
# return classes_present.sum() / len(classes_present) * score
class avg_average_precision(object):
""" Average of Average Precision, make sklearn average precision scorer works with multi-class """
def __init__(self, average="macro"):
self.average = average
self._deprecation_msg = None
def __call__(self, clf, X, y, sample_weight=None):
y_pred = clf.predict_proba(X)
if not hasattr(clf, "classes_"):
raise ValueError("estimator should have a 'classes_' attribute")
if not y_pred.shape[1] == len(clf.classes_):
raise ValueError("estimator.classes_ isn't the same shape as predict_proba")
y2 = np.zeros(y_pred.shape)
for i, cl in enumerate(clf.classes_):
y2[:, i] = 1 * (y == cl)
classes_present = y2.sum(axis=0) > 0
return sklearn.metrics.average_precision_score(
y2[:, classes_present], y_pred[:, classes_present], sample_weight=sample_weight, average=self.average
)
class confidence_score(object):
""" Mesure howmuch 'maxproba' helps discriminate between mistaken and correct instance
If the maximum probability is high, the model is confident in its prediction otherwise the model esitates.
We'd like error to be less present when maximum proba is high.
roc_auc_score mesures how much 'maxproba' discrimite betweeen mistaken and correct instance
Remark : if we note p(X,c) := Proba(Y = c | X) for a given class c
then we have Proba( Y = Ypredict | X ) = Max( p(X,c) for c in classes )
"""
def __init__(self):
self._deprecation_msg = None
def __call__(self, clf, X, y, sample_weight=None):
yhat = clf.predict(X)
yhat_proba = clf.predict_proba(X)
if isinstance(yhat_proba, pd.DataFrame):
yhat_proba = yhat_proba.values
yhat_maxproba = yhat_proba.max(axis=1)
is_correct = 1 * (yhat == y)
if (is_correct == 1).all():
return np.nan
else:
return sklearn.metrics.roc_auc_score(y_true=is_correct, y_score=yhat_maxproba, sample_weight=sample_weight)
def log_r2_score(y_true, y_pred, sample_weight=None, multioutput="uniform_average"):
""" r squared on log of prediction
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average'] \
or array-like of shape = (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(y_true, y_pred, multioutput)
if not (y_true >= 0).all() and not (y_pred >= 0).all():
raise ValueError("Mean Squared Logarithmic Error cannot be used when " "targets contain negative values.")
return r2_score(np.log(y_true + 1), np.log(y_pred + 1), sample_weight, multioutput)
def _cached_call(cache, estimator, method, *args, **kwargs):
"""Call estimator with method and args and kwargs."""
# Remark : copy of sk22 code
if cache is None:
return getattr(estimator, method)(*args, **kwargs)
try:
return cache[method]
except KeyError:
result = getattr(estimator, method)(*args, **kwargs)
cache[method] = result
return result
class _CustomPredictScorer(_BaseScorer):
def __init__(self, score_func, sign, kwargs):
super().__init__(score_func, sign, kwargs)
def __call__(self, estimator, X, y_true=None, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
# Remark : copy of sk22 code TODO
return self._score(partial(_cached_call, None), estimator, X, y_true,
sample_weight=sample_weight)
def _score(self, method_caller, estimator, X, y_true=None, sample_weight=None):
y_pred = method_caller(estimator, "predict", X)
try:
return self._sign * self._score_func(X, y_pred, **self._kwargs)
except Exception as e:
logger.warning(str(e) + ": NaN will be return")
return np.nan
def make_scorer_clustering(score_func, greater_is_better, **kwargs):
sign = 1 if greater_is_better else -1
return _CustomPredictScorer(score_func, sign, kwargs)
class _GroupProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, groups, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
groups : array-like
The groups to use for the scoring
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
y_pred = clf.predict_proba(X)
if y_type == "binary":
if y_pred.shape[1] == 2:
y_pred = y_pred[:, 1]
else:
raise ValueError(
"got predict_proba of shape {},"
" but need classifier with two"
" classes for {} scoring".format(y_pred.shape, self._score_func.__name__)
)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred, groups, sample_weight=sample_weight, **self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, groups, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
def max_proba_group_accuracy(y, y_pred, groups):
""" group by group average of 'True' if prediction with highest probability is True """
if y_pred.ndim != 1:
raise ValueError("this function is for binary classification only")
df =
|
pd.DataFrame({"proba": y_pred, "groups": groups, "y": y})
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import tarfile
import sys
import os
import scipy.spatial
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
import collections
import json
import warnings
import pickle
import multiprocessing
import parasail
import pwseqdist
from zipdist.zip2 import Zipdist2
from . import repertoire_db
from . import pgen
from . import mappers
from . import pairwise
# includes tools for use with explore.py
#from paths import path_to_matrices
#This replaces: from tcrdist.cdr3s_human import pb_cdrs
pb_cdrs = repertoire_db.generate_pbr_cdr()
class TCRrep:
"""
Class for managing a T-Cell Receptor Repertoire (TCRrep) analysis. Produce
a distance measure based on comparisons from multiple T-Cell receptor
complementarity-determining regions (CDRs)
Attributes
----------
cell_df : pandas.core.frame.DataFrame
input data at the level of individual cell level
clone_df : pandas.core.frame.DataFrame
deduplicated data frame at the level of unique clones
index_cols : list
list of strings, indicating columns to group cells to clones
organism : string
either "human" or "mouse"
meta_cols : list
list of strings, indicating metadata columns (e.g. hla_type)
chains : list
list of strings containing one or more of 'alpha', 'beta', 'gamma' or 'delta'
stored_tcrdist : list
list containing all previously generated outputs of
`TCRrep.compute_paired_tcrdist`
paired_tcrdist : ndarray
most recent output of :py:meth:`tcrdist.repertoire.TCRrep.compute_paired_tcrdist`
paired_tcrdist_weights : dictionary
CDR weights used to generate the most recent output of
TCRrep.compute_paired_tcrdist`
all_genes : dictionary
dictionary of reference TCRs
Methods
-------
TCRrep.infer_cdrs_from_v_gene()
infer CDR amino acid sequences from v-gene specified
deduplicate()
remove duplicate clones by grouping
compute_pairwise_all()
compute pairwise distances on deduplicated data for all regions in
a chain. Alternatively can compute distance between a
compute_paired_tcrdist()
calculate weighted pairwise distance across all CDRs
generate_ref_genes_from_db()
generates all_genes attribute a dictionary of reference TCRs
"""
def __init__(self,
cell_df,
chains=['alpha', 'beta'],
organism = "human",
db_file = "alphabeta_db.tsv"):
self.db_file = db_file
self.cell_df = cell_df
self.chains = chains
self.organism = organism
self.pwdist_df = None
self.clone_df = None
self.index_cols = []
self.stored_tcrdist = []
self.paired_tcrdist = None
self.paired_tcrdist_weights = None
self.meta_cols = None
self.project_id = "<Your TCR Repertoire Project>"
self.all_genes = None
self.imgt_aligned_status = None
# VALIDATION OF INPUTS
# check that chains are valid.
self._validate_organism()
self._validate_chains()
# check that is a pd.DataFrame
self._validate_cell_df()
# INIT OF SPECIFIC ATTRIBUTES BASED ON SELECTED CHAINS
self._initialize_chain_specific_attributes()
# INIT the REFERENCE DB see repertoire_db.py
self.generate_ref_genes_from_db(db_file)
def __repr__(self):
return 'tcrdist.repertoire.TCRrep for {}\n with index_cols: {}\n with model organism: {}'.format(self.project_id, self.index_cols, self.organism)
def __getitem__(self, position):
# It should be decided whether get item should refer to the or to the clone_df or it could be for iterating over pw dist matrices
if self.clone_df is None:
return self.cell_df.loc[position]
if self.clone_df is not None:
return self.clone_df.loc[position]
def __len__(self):
return self.cell_df.shape[0]
def generate_ref_genes_from_db(self, db_file = "alphabeta_db.tsv"):
"""
Responsible for generating the all_genes attribute containing all
the reference TCR data.
Parameters
----------
db_file : string
Returns an ordered dictionary of reference sequences
"""
self.all_genes = repertoire_db.RefGeneSet(db_file).all_genes
def _map_gene_to_reference_seq2(self,
organism,
gene,
cdr,
attr ='cdrs_no_gaps'):
"""
internal function that looks up the cdr sequence (gapped or ungapped)
from the self.all_genes library
Parameter
---------
organism : string
mouse or human
gene : string
specifies the TCR gene such as 'TRAV1*01'
cdr : int
0 - CDR1, 1-CDR2 and 2 - CDR2.5
attr : string
'cdrs_no_gaps' or 'cdrs_aligned' with gaps from IMGT
"""
try:
aa_string = self.all_genes[organism][gene].__dict__[attr][cdr]
except KeyError:
aa_string = None
warnings.warn("{} gene was not recognized in reference db no cdr seq could be inferred".format(gene))
return(aa_string)
def deduplicate(self):
"""
With attribute self.index_col calls _deduplicate() and assigns
result to attribute self.clone_df
"""
self.clone_df = _deduplicate(self.cell_df, self.index_cols)
# check if any clones were lost due to missing information
if np.sum(self.cell_df['count']) != np.sum(self.clone_df['count']):
n_cells_lost = np.sum(self.cell_df['count']) - np.sum(self.clone_df['count'])
n_cell = np.sum(self.cell_df['count'])
warnings.warn(f"Not all cells/sequences could be grouped into clones. {n_cells_lost} of {n_cell} were not captured. This occurs when any of the values in the index columns are null or missing for a given sequence. To see entries with missing values use: tcrdist.repertoire.TCRrep.show_incomplete()\n")
# if no clone id column provided thetrn create one as a sequence of numbers
if "clone_id" not in self.clone_df:
N = self.clone_df.shape[0]
self.clone_df['clone_id'] = range(1, N + 1 ,1)
return self
def show_incomplete(self):
ind = self.cell_df[self.index_cols].isnull().any(axis = 1)
incomplete_clones = self.cell_df.loc[ind,self.index_cols].copy()
return incomplete_clones
# def tcr_motif_clones_df(self):
# """
# Use this function to create a clones_df input appropriate to TCRMotif.
#
# It make use of a mapper to ensure proper columns and column names
#
# Example
# -------
# TCRMotif(clones_df = TCRRep.tcr_motif_clones_df())
# """
# return _map_clone_df_to_TCRMotif_clone_df(self.clone_df)
def tcr_motif_clones_df(self):
"""
Use this function to create a clones_df input appropriate to TCRMotif.
It make use of a mapper to ensure proper columns and column names
Example
-------
TCRMotif(clones_df = TCRrep.tcr_motif_clones_df())
"""
return mappers.generic_pandas_mapper(self.clone_df,
mappers.TCRrep_clone_df_to_TCRMotif_clone_df)
def infer_cdrs_from_v_gene(self, chain, imgt_aligned = False):
"""
Function taking TCR v-gene name to infer the amino amino_acid
sequence of cdr1, cdr2, and pmhc loop regions.
Parameters
----------
chain : string
'alpha', 'beta', 'gamma', or 'delta'
imgt_aligned : boolean
if True cdr1, cdr2, cdr2.5 will be returned with gaps
and by definition will be the same length. MSH.......ET
Returns
-------
self.cell_df : pandas.core.frame.DataFrame
Assigns [cdr3|cdr2|cdr1|pmhc]_[a|b|d|g]_aa columns in self.cell_df
Examples
--------
>>> testrep = TCRrep(cell_df = example_df, organism = "human", chains= ["alpha","beta"])
>>> testrep.infer_cdrs_from_v_gene(chain = "alpha")
>>> testrep.infer_cdrs_from_v_gene(chain = "beta")
>>> testrep.index_cols = testrep.index_cols + ['cdr1_a_aa','cdr2_a_aa', 'pmhc_a_aa', 'cdr1_b_aa', 'cdr2_b_aa', 'pmhc_b_aa']
Notes
-----
This function takes the V-gene names and infers the amino amino_acid
sequence of the cdr1, cdr2, and pmhc region (pmhc refers to the
pMHC-facing loop between CDR2 and CDR3 (IMGT alignment columns 81 - 86.
These sequences are based up on lookup from the dictionary here:
originally: from tcrdist.cdr3s_human import pb_cdrs
now:
self.generate_ref_genes_from_db(db_file)
imgt_aligned : boolean
if True cdr1, cdr2, cdr2.5 will be returned with gaps
and by definition will be the same length.
MSH.......ET
FNH.......DT
LGH.......NA
References
----------
IMGT definitions of cdr1, cdr2, and pMHC-facing can be found here
http://www.imgt.org/IMGTScientificChart/Nomenclature/IMGT-FRCDRdefinition.html
"""
if not imgt_aligned:
self.imgt_aligned_status = False
f0 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 0,
organism = self.organism,
attr ='cdrs_no_gaps')
f1 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 1,
organism = self.organism,
attr ='cdrs_no_gaps')
f2 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 2,
organism = self.organism,
attr ='cdrs_no_gaps')
else:
self.imgt_aligned_status = True
f0 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 0,
organism = self.organism,
attr ='cdrs')
f1 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 1,
organism = self.organism,
attr ='cdrs')
f2 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 2,
organism = self.organism,
attr ='cdrs')
if chain is "alpha":
self.cell_df['cdr1_a_aa'] = list(map(f0, self.cell_df.v_a_gene))
self.cell_df['cdr2_a_aa'] = list(map(f1, self.cell_df.v_a_gene))
self.cell_df['pmhc_a_aa'] = list(map(f2, self.cell_df.v_a_gene))
if chain is "beta":
self.cell_df['cdr1_b_aa'] = list(map(f0, self.cell_df.v_b_gene))
self.cell_df['cdr2_b_aa'] = list(map(f1, self.cell_df.v_b_gene))
self.cell_df['pmhc_b_aa'] = list(map(f2, self.cell_df.v_b_gene))
if chain is "gamma":
self.cell_df['cdr1_g_aa'] = list(map(f0, self.cell_df.v_g_gene))
self.cell_df['cdr2_g_aa'] = list(map(f1, self.cell_df.v_g_gene))
self.cell_df['pmhc_g_aa'] = list(map(f2, self.cell_df.v_g_gene))
if chain is "delta":
self.cell_df['cdr1_d_aa'] = list(map(f0, self.cell_df.v_d_gene))
self.cell_df['cdr2_d_aa'] = list(map(f1, self.cell_df.v_d_gene))
self.cell_df['pmhc_d_aa'] = list(map(f2, self.cell_df.v_d_gene))
def infer_olga_aa_cdr3_pgens(self,
chain,
cdr3_only = False,
chain_folder = None,
recomb_type = None):
"""
Infer the probability of generation using the Olga Code base
(Sethna et al. 2018) updated to python 3 for use with tcrdist.
Parameters
----------
chain : string
'alpha', 'beta' (TODO: create default models for 'gamma' and 'delta')
cdr3_only : boolean
(optional) if True, the amino acid cdr3 probability of generation statistic
will be calculated without using the V or J gene usage statistics
chain_folder : string
(optional) specifies the OLGA default model folder containing a
generative model. When None (which is recommended), the default
folder is chosen based on the chain argument.
recomb_type : string
(optional) 'VDJ' or 'VJ' specifying the OLGA recombination model.
When None (which is recommended), the default folder is chosen based
on the chain argument.
Returns
-------
olga_pgens : pd.Series
containing the probability of generation, this output is also assigned
to clone_df.cdr3_[a|b|g|d]_aa_pgen
Notes
-----
tcrdist2 authors UPDATED THE FOLLOWING CODE TO PYTHON 3
USING COMMIT e825c333f0f9a4eb02132e0bcf86f0dca9123114 (Jan 18, 2019)
ORIGINAL OLGA CODE CAN BE FOUND AT:
https://github.com/zsethna/OLGA
"""
assert(isinstance(self.clone_df, pd.DataFrame)), "this function requires a valid TCRrep.clone_df has been instantiated"
# The Nested If Statements assigns cdr3s, v_genes, j_genes based on chain, organism and other optional args
if chain == "alpha":
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_alpha"
elif self.organism is 'mouse':
raise ValueError("SORRY: OLGA default files do not yet support mouse alpha TCRs")
chain_folder = "mouse_T_alpha"
if (recomb_type is None):
recomb_type = "VJ"
cdr3s = self.clone_df.cdr3_a_aa
if not cdr3_only:
v_genes = self.clone_df.v_a_gene
j_genes = self.clone_df.j_a_gene
else:
v_genes = None
j_genes = None
if chain == "beta":
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_beta"
elif self.organism is 'mouse':
chain_folder = "mouse_T_beta"
if (recomb_type is None):
recomb_type = "VDJ"
cdr3s = self.clone_df.cdr3_b_aa
if not cdr3_only:
v_genes = self.clone_df.v_b_gene
j_genes = self.clone_df.j_b_gene
else:
v_genes = None
j_genes = None
if chain == "gamma":
raise ValueError("SORRY: OLGA default files do not yet support gamma TCRs")
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_gamma"
elif self.organism is 'mouse':
chain_folder = "mouse_T_gamma"
if (recomb_type is None):
recomb_type = None # ??? Not sure what is teh most appropriate model
cdr3s = self.clone_df.cdr3_g_aa
if not cdr3_only:
v_genes = self.clone_df.v_g_gene
j_genes = self.clone_df.j_g_gene
else:
v_genes = None
j_genes = None
if chain == "delta":
raise ValueError("SORRY:OLGA default files do not yet support delta TCRs")
if (chain_folder is None):
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_delta"
elif self.organism is 'mouse':
chain_folder = "mouse_T_delta"
if (recomb_type is None):
recomb_type = None # ??? Not sure what is teh most appropriate model
cdr3s = self.clone_df.cdr3_d_aa
if not cdr3_only:
v_genes = self.clone_df.v_d_gene
j_genes = self.clone_df.j_d_gene
else:
v_genes = None
j_genes = None
# initializes the appropriate olga genomic model
my_olga_model = pgen.OlgaModel(chain_folder = chain_folder,
recomb_type = recomb_type)
# computes pgen from clone_df
olga_pgens = my_olga_model.compute_aa_cdr3_pgens(cdr3s,
v_genes,
j_genes)
if chain is "alpha":
self.clone_df['cdr3_a_aa_pgen'] = pd.Series(olga_pgens)
if chain is "beta":
self.clone_df['cdr3_b_aa_pgen'] = pd.Series(olga_pgens)
if chain is "gamma":
self.clone_df['cdr3_g_aa_pgen'] = pd.Series(olga_pgens)
if chain is "delta":
self.clone_df['cdr3_d_aa_pgen'] = pd.Series(olga_pgens)
return(pd.Series(olga_pgens))
def archive(self,
dest = "default_archive",
dest_tar_name = "default_archive.tar.gz",
verbose = True,
use_csv = True):
"""
Use Zipdist2 to Make an Archive.tar.gz
Parameters
----------
dest : str
e.g., 'default_archive'
dest_tar_name : str
e.g., 'default_archive.tar.gz'
verbose : bool
if True, report steps in archive process
use_csv : bool
if True, archive will include .csv file. Useful for porting files to other applications, but creates large files.
Example
-------
.. code-block:: python
tr = TCRrep(cell_df = pd.DataFrame(), organism = "mouse")
tr.archive(dest = "default_archive", dest_tar_name = "default_archive.tar.gz")
Notes
-----
See :py:meth:`tcrdist.repertoire.rebuild`: for reubilding a TCRrep instance from
an TCRrep archive .tar.gz file.
"""
self.cell_df_index = self.cell_df.index.copy()
self.cell_df = self.cell_df.reset_index()
z = Zipdist2(name = dest_tar_name , target = self)
z._save(dest = dest, dest_tar = dest_tar_name, verbose = verbose, use_csv = use_csv )
sys.stdout.write(f"\tArchiving your TCRrep using Zipdist2 in [{dest_tar_name}]\n")
def rebuild(self, dest_tar_name = "default_archive.tar.gz", verbose = True ):
"""
Use Zipdist2 to reubild a TCRrep instance from an Archive.tar.gz
Parameters
----------
dest_tar_name : str
e.g., 'default_archive.tar.gz'
verbose : bool
If True, report rebuilding process steps.
Example
-------
Shows :py:meth:`tcrdist.repertoire.archive` and :py:meth:`tcrdist.repertoire.rebuild`
used together.
.. code-block:: python
tr = TCRrep(cell_df = pd.DataFrame(), organism = "mouse")
tr.archive(dest = "default_archive", dest_tar_name = "default_archive.tar.gz")
tr_new = TCRrep(cell_df = pd.DataFrame(), organism = "mouse")
tr_new.rebuild(dest_tar_name = "default_archive.tar.gz")
Notes
-----
See :py:meth:`tcrdist.repertoire.archive` for creating TCRrep archive file.
"""
#tr = TCRrep(cell_df=df.iloc[0:0,:], chains=chains, organism='mouse')
z = Zipdist2(name = "default_archive", target = self)
z._build(dest_tar = dest_tar_name , target = self, verbose = verbose)
# VALIDATION OF INPUTS
# check that chains are valid.
self._validate_organism()
self._validate_chains()
# check that is a pd.DataFrame
self._validate_cell_df()
# RE INIT the REFERENCE DB see repertoire_db.py
self.generate_ref_genes_from_db(self.db_file)
def tcrdist2(self,
metric = "nw",
processes = None,
weights = None,
dump = False,
reduce = True,
save = False,
dest = "default_archive",
dest_tar_name = "default_archive.tar.gz",
verbose = True):
"""
Automated calculation of single chain and paired chain tcr-distances
Parameters
----------
metric : str
specified metric, currently only "nw" and "hamming" are supported
(see notes for legacy methods)
processes : int
number of cpus to use; the default is greedy and will use half of available
weights : dict
override cdr weightings
dump : bool
if True, dump intermediate cdr1, cdr2, and pmhc pairwise matrices
reduce : bool
if True, converts distance matrices to a smaller data type.
save : bool
if True, saves intermediate files to dest
dest : str
path to save components
verbose : bool
If True, provide sys.stdout reports.
Notes
-----
tcrdist2 is a method to help new-users run tcrdist2 with sensible defaults.
Distance metrics are highly customizable.
Consult the `docs <https://tcrdist2.readthedocs.io>`_ for more information.
To compute Dash et al. 2017 style tcrdistance, instead of tcrdist2,
use commands:
TCRrep._tcrdist_legacy_method_alpha_beta()
TCRrep._tcrdist_legacy_method_beta()
TCRrep._tcrdist_legacy_method_alpha()
TCRrep._tcrdist_legacy_method_gamma_delta()
TCRrep._tcrdist_legacy_method_gamma()
TCRrep._tcrdist_legacy_method_delta()
"""
# Default to use all available processes
if processes is None:
max_threads = multiprocessing.cpu_count()
processes = max_threads // 2
sys.stdout.write(f"trcdist2 detected {max_threads } available cpus/threads.\n")
sys.stdout.write(f"\tTCRrep use parallel processing, setting default to use {processes} cpus/threads.\n")
sys.stdout.write(f"\tThe `processes` arg of TCRrep.tcrdist2() can be set manually\n")
for chain in self.chains:
self.infer_cdrs_from_v_gene(chain=chain, imgt_aligned=True)
if weights is None:
weights = {'cdr1_a_aa':1,
'cdr2_a_aa':1,
'cdr3_a_aa':3,
'pmhc_a_aa':1,
'cdr1_b_aa':1,
'cdr2_b_aa':1,
'cdr3_b_aa':3,
'pmhc_b_aa':1,
'cdr1_g_aa':1,
'cdr2_g_aa':1,
'cdr3_g_aa':3,
'pmhc_g_aa':1,
'cdr1_d_aa':1,
'cdr2_d_aa':1,
'cdr3_d_aa':3,
'pmhc_d_aa':1,
'v_a_gene':0,
'j_a_gene':0,
'v_b_gene':0,
'j_b_gene':0,
'v_g_gene':0,
'j_g_gene':0,
'v_d_gene':0,
'j_d_gene':0,
'cdr3_a_nucseq':0,
'cdr3_b_nucseq':0,
'cdr3_g_nucseq':0,
'cdr3_d_nucseq':0}
index_cdrs = [k for k in weights.keys() if k in self.cell_df.columns]
for x in ['clone_id', 'subject', 'epitope']:
assert 'clone_id' in self.cell_df.columns, f"{x} must be in TCRrep.cell_df"
self.index_cols = ['clone_id', 'subject', 'epitope'] + index_cdrs
sys.stdout.write("Deduplicating your TCRrep.cell_df to make TCRrep.clone_df.\n")
self.deduplicate()
sys.stdout.write(f"Computing pairwise matrices for multiple Complementarity Determining Regions (CDRs):.\n")
for chain in self.chains:
if verbose: sys.stdout.write(f"\tComputing pairwise matrices for cdrs within the {chain}-chain using the {metric} metric.\n")
self.compute_pairwise_all(chain = chain, metric = metric, processes = processes)
sys.stdout.write("Calculating composite tcrdistance measures:\n")
self.compute_paired_tcrdist( chains=self.chains, store_result=False)
for chain in self.chains:
if verbose: sys.stdout.write(f"\tSingle chain pairwise tcrdistances are in attribute : TCRrep.pw_{chain}\n")
if verbose: sys.stdout.write(f"\tCombined pairwise tcrdistances are in attribute : TCRrep.pw_tcrdist\n")
if verbose: sys.stdout.write(f"\tCDR specific tcrdistances are in attributes, e.g., : TCRrep.cdr3_{chain[0]}_aa_pw\n")
# <dump> boolean controls whether we dump easy to recalculate cdr1, cdr2, pmhc
# <shrink> boolean controls whether we convert distance matrices
# to a smaller data type.
if reduce:
data_type = 'int16'
if verbose: sys.stdout.write(f"Reducing File Size: `reduce` argumment set to {reduce}:\n")
self.reduce_file_size( data_type = data_type, verbose = True)
# pairwise matices, which most users will never again.
if dump:
if verbose: sys.stdout.write(f"Cleanup: `dump` argument set to {dump}. Dumping individual CDR specific distance matrices:\n")
for i in index_cdrs:
if i.startswith("cdr1") or i.startswith("cdr2") or i.startswith("pmhc"):
if i.endswith("aa"):
i = f"{i}_pw"
sys.stdout.write(f"\tDumping : {i}\n")
self.__dict__[i] = None
if save:
if verbose: sys.stdout.write(f"Archiving your TCRrep using Zipdist2 (save = {save})\n")
# To avoid = ValueError: feather does not support serializing a non-default index for the index; you can .reset_index() to make the index into column(s)
self.archive(dest = dest, dest_tar_name = dest_tar_name, verbose = True)
if verbose: sys.stdout.write(f"\tArchiving your TCRrep using Zipdist2 in [{dest_tar_name}]\n")
if verbose: sys.stdout.write(f"TCRrep.tcrdist2() COMPLETED SUCCESSFULLY, see the docs for Analysis steps!\n")
def compute_pairwise_all(self,
chain,
compute_specific_region = None,
metric = "hamming",
processes = 2,
user_function = None,
to_matrix = True,
**kwargs):
"""
Computes pairwise distances for all regions on a given
chain or for a specific region on that chain.
Parameters
----------
chain : string
'alpha', 'beta', 'gamma', or 'delta'
compute_specific_region : string
optional string (e.g. "cdr2_a_aa") to over-ride function behavior
and compute only a single region
metric : string
'nw', 'hamming', or 'custom' (or if legacy tcrdist is to be calculated,
"tcrdist_cdr3", "tcrdist_cdr1", "tcrdist_cdr2",
"tcrdist_cdr2.5", "tcrdist_pmhc" can be supplied. WARNING:
imgt_aligned must be set to True in tr.infer_cdrs_from_v_gene().
processes : int
int for number of available cpu for multiprocessing (to see available
try multiprocessing.cpu_count())
user_function : function
function for a custom distance metric on two strings (This is
an advanced option, so don't use this unless you are absolutely
sure what you are doing; metric arg must be set to 'custom').
to_matrix : boolean
True will return pairwise distance as result as a 2D ndarray
Notes
-----
Uses _assign_pw_result to assign self.[cdr3|cdr2|cdr1|pmhc]_[a|b|d|g]_aa_pw objects
Examples
--------
>>> testrep = TCRrep(cell_df = example_df, organism = "human", chains= ["alpha","beta"])
>>> testrep.infer_cdrs_from_v_gene(chain = "alpha")
>>> testrep.infer_cdrs_from_v_gene(chain = "beta")
>>> testrep.index_cols = testrep.index_cols + ['cdr1_a_aa','cdr2_a_aa','pmhc_a_aa', 'cdr1_b_aa', 'cdr2_b_aa', 'pmhc_b_aa']
>>> testrep.deduplicate()
>>> testrep.compute_pairwise_all(chain = "alpha", metric= "hamming")
>>> testrep.compute_pairwise_all(chain = "beta", metric= "hamming")
alternatively, compute each region one by one
>>> testrep.compute_pairwise_all(chain = "beta", compute_specific_region="cdr1_b_aa")
>>> testrep.compute_pairwise_all(chain = "alpha", compute_specific_region="cdr2_a_aa")
"""
# validate chain argument passed
self._validate_chain(chain)
if metric in ["tcrdist_cdr3", "tcrdist_cdr1", "tcrdist_cdr2",
"tcrdist_cdr2.5", "tcrdist_pmhc"]:
if not self.imgt_aligned_status:
raise ValueError("imgt_aligned must be set to True in tr.infer_cdrs_from_v_gene()")
# If compute_specific_region is None, then the behavior is to loop through the a list regions.
if compute_specific_region is None:
index_col_from_chain = {'alpha' : ['cdr3_a_aa', 'cdr2_a_aa',
'cdr1_a_aa', 'pmhc_a_aa'],
'beta' : ['cdr3_b_aa', 'cdr2_b_aa',
'cdr1_b_aa', 'pmhc_b_aa'],
'gamma' : ['cdr3_g_aa', 'cdr2_g_aa',
'cdr1_g_aa', 'pmhc_g_aa'],
'delta' : ['cdr3_d_aa', 'cdr2_d_aa',
'cdr1_d_aa', 'pmhc_d_aa']}
# Alternative behavior: is to loop over a single chain and region.
else:
index_col_from_chain = {}
index_col_from_chain[chain] = [compute_specific_region]
for index_col in index_col_from_chain[chain]:
try:
sequences = self.clone_df[index_col]
except KeyError:
warnings.warn("{} not found, no distances computed for {}".format(index_col, index_col))
continue
# COMPUTE PAIRWISE
# If kwargs were passed use them, otherwise pass chain-sp. smat from above
if ('matrix' in kwargs) or ("open" in kwargs):
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**kwargs)
else:
# Pull the default substitution matrix from object attributes
smat = self._get_smat(chain = chain, index_col = index_col)
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**{'matrix' : smat})
# ASSIGN RESULT
self._assign_pw_result(pw = pw, chain=chain, index_col=index_col)
def compute_paired_tcrdist(self,
chains = ['alpha', 'beta'],
replacement_weights = {},
store_result = False):
"""
Computes tcrdistance metric combining distances metrics across multiple
T Cell Receptor CDR regions.
Parameters
----------
chains : list
list of strings containing some combination of 'alpha', 'beta',
'gamma', and 'delta'
replacement_weights : dictionary
optional dictionary of the form {'cdr1_a_aa_pw':1, 'cdr2_a_aa_pw':1}
used to place greater weight on certain TCR regions. The default
is a weight of 1.
store_result : boolean
True will store results to
:py:attr:`TCRrep.stored_tcrdist`
Returns
-------
r : dictionary
a dictionary with keys paired_tcrdist points to a 2D
tcrdist np.ndarray and paired_tcrdist_weights pointing to
dictionary of weights. See notes.
Notes
-----
Calling this function assigns results to
`TCRrep.paired_tcrdist` and
`TCRrep.paired_tcrdist_weights`
and stores r to
`TCRrep.stored_tcrdist`
In addition it returns a dictionary with keys `paired_tcrdist` 2D
tcrdist np.array and `paired_tcrdist_weights`
a dictionary of regions and relative weights:
{'paired_tcrdist': array([[ 0., 76., 80.,..., 89., 89., 87.],
[ 76., 0., 60., ..., 81., 75., 43.],
[ 80., 60., 0., ..., 59., 81., 77.],
...,
[ 89., 81., 59., ..., 0., 60., 58.],
[ 89., 75., 81., ..., 60., 0., 40.],
[ 87., 43., 77., ..., 58., 40., 0.]]),
'paired_tcrdist_weights': {'cdr1_a_aa_pw': 1,
'cdr1_b_aa_pw': 2,
'cdr2_a_aa_pw': 1,
'cdr2_b_aa_pw': 2,
'cdr3_a_aa_pw': 2,
'cdr3_b_aa_pw': 4,
'pmhc_a_aa_pw': 1,
'pmhc_b_aa_pw': 2}}
"""
[self._validate_chain(c) for c in chains]
weights = {'cdr1_a_aa_pw':1,
'cdr2_a_aa_pw':1,
'cdr3_a_aa_pw':1,
'pmhc_a_aa_pw':1,
'cdr1_b_aa_pw':1,
'cdr2_b_aa_pw':1,
'cdr3_b_aa_pw':1,
'pmhc_b_aa_pw':1,
'cdr1_g_aa_pw':1,
'cdr2_g_aa_pw':1,
'cdr3_g_aa_pw':1,
'pmhc_g_aa_pw':1,
'cdr1_d_aa_pw':1,
'cdr2_d_aa_pw':1,
'cdr3_d_aa_pw':1,
'pmhc_d_aa_pw':1}
for k in replacement_weights:
weights[k] = replacement_weights[k]
alpha_keys = [k for k in list(weights.keys()) if k.endswith("a_aa_pw")]
beta_keys = [k for k in list(weights.keys()) if k.endswith("b_aa_pw")]
gamma_keys = [k for k in list(weights.keys()) if k.endswith("g_aa_pw")]
delta_keys = [k for k in list(weights.keys()) if k.endswith("d_aa_pw")]
# for single chain computation, results in TCRrep.pw_alpha, TCRrep.pw_beta, TCRrep.pw_gamma, and or TCRrep.pw_delta,
if 'alpha' in chains:
tcrdist = np.zeros(self.cdr3_a_aa_pw.shape)
for k in alpha_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_alpha = tcrdist
if 'beta' in chains:
tcrdist = np.zeros(self.cdr3_b_aa_pw.shape)
for k in beta_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_beta = tcrdist
if 'gamma' in chains:
tcrdist = np.zeros(self.cdr3_g_aa_pw.shape)
for k in gamma_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_gamma = tcrdist
if 'delta' in chains:
tcrdist = np.zeros(self.cdr3_d_aa_pw.shape)
for k in delta_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_delta = tcrdist
# For combined chain tcrdist, restults in TCRrep.paired_tcrdist and TCRrep.pw_tcrdist
full_keys = []
if 'alpha' in chains:
full_keys = full_keys + alpha_keys
if 'beta' in chains:
full_keys = full_keys + beta_keys
if 'gamma' in chains:
full_keys = full_keys + gamma_keys
if 'delta' in chains:
full_keys = full_keys + delta_keys
# initialize tcrdist matrix size
for k in full_keys:
try:
tcrdist = np.zeros(self.__dict__[k].shape)
break
except KeyError:
pass
for k in full_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
pass
# keep 'paired_tcrdist' to avoid breaking tests
self.paired_tcrdist = tcrdist
self.pw_tcrdist = tcrdist
self.paired_tcrdist_weights = {k:weights[k] for k in full_keys}
# Typically we don't want to store different tcrdistance in the same repertoire, but
r = {'paired_tcrdist' : tcrdist,
'paired_tcrdist_weights' : {k:weights[k] for k in full_keys}}
if store_result:
self.stored_tcrdist.append(r)
return(r)
def compute_pairwise(self,
chain,
metric = "nw",
processes = 2,
user_function = None,
to_matrix = True,
**kwargs):
"""
Early Function to be replaced with compute_pairwise_all.
TODO: Rewrite test and remove.
"""
# validate chain argument passed
self._validate_chain(chain)
# another option would be to loop through the a list of chains
index_col_from_chain = {'alpha' : 'cdr3_a_aa',
'beta' : 'cdr3_b_aa',
'gamma' : 'crd3_g_aa',
'delta' : 'cdr3_d_aa'}
sequences = self.clone_df[index_col_from_chain[chain]]
# Pull the default substitution matrix
if chain == "alpha":
smat = self.cdr3_a_aa_smat
elif chain == "beta":
smat = self.cdr3_b_aa_smat
elif chain == 'gamma':
smat = self.cdr3_g_aa_smat
elif chain == "delta":
smat = self.cdr3_d_aa_smat
# If kwargs were passed use them, otherwise pass chain-sp. smat from above
if ('matrix' in kwargs) or ("open" in kwargs):
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**kwargs)
else:
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**{'matrix' : smat})
if chain == "alpha":
self.cdr3_a_aa_pw = pw
elif chain == "beta":
self.cdr3_b_aa_pw = pw
elif chain == 'gamma':
self.cdr3_g_aa_pw = pw
elif chain == "delta":
self.cdr3_d_aa_pw = pw
def generate_cluster_index(self, t = 75, criterion = "distance", method = "complete", append_counts = False):
"""
Add 'cluster_index' column to TCRrep.clone_df
Parameters
----------
t : int
scipy.cluster.hierarchy.fcluster param t
criterion : str
scipy.cluster.hierarchy.fcluster param criterion
method : str
scipy.cluster.linkage parma method
Notes
-----
https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html
"""
compressed_dmat = scipy.spatial.distance.squareform(self.paired_tcrdist, force = "vector")
Z = linkage(compressed_dmat, method = "complete")
cluster_index = fcluster(Z, t = t, criterion = criterion)
assert len(cluster_index) == self.clone_df.shape[0]
assert len(cluster_index) == self.paired_tcrdist.shape[0]
self.clone_df['cluster_index'] = cluster_index
if append_counts:
self._append_cluster_count()
self._append_seq_counts_per_cluster()
def _append_cluster_count(self):
"""
Appends the number of clones in a cluster to each row of TCRrep.clone_df
"""
cluster_count = self.clone_df.cluster_index.value_counts().\
reset_index().\
rename(columns = {'index':'cluster_index', "cluster_index": "cluster_count"}).\
copy()
self.clone_df = self.clone_df.merge(cluster_count, how= "left", left_on = "cluster_index", right_on = "cluster_index")
def _append_seq_counts_per_cluster(self):
"""
Appends the sum of seq counts per cluster to each row of TCRrep.clone_df
"""
seq_counts = self.clone_df.\
groupby(['cluster_index'])['count'].sum().\
reset_index().\
rename(columns = {'count':'seq_count'})
self.clone_df = self.clone_df.merge(seq_counts, how = "left", left_on = "cluster_index", right_on = "cluster_index")
def ispublic(self, gr, var = "subject", n = 1):
"""
Return True if a cluster public, defined as comprised of members from multiple individuals
or cell subsets (e.g., CD4/CD8)
Parameters
----------
gr : group
within pandas Data.Frame.groupby
var : str
variable name of class that group most transcend to be considered public
m : int
number of unique values of selected variable to be considered public
Returns
-------
r : bool
True if a cluster public
"""
r = len(gr[var].value_counts()) > n
if r:
return 'public'
else:
return 'private'
def get_func_stat(self, gr, var, func = np.median, **kwargs):
"""
get summary statistic by applying a func to a group
Parameter
---------
gr : group
within pandas Data.Frame.groupby
var : str
variable name of class that group most transcend to be considered public
func : function
function that can operate on a series or list of values specified by var
Returns
-------
r : float or int
"""
r = func(gr[var], **kwargs)
return r
def get_cluster_summary(self, df=None, groupvar = 'cluster_index'):
"""get_cluster_pgen_and_count_summary """
if df is None:
df = self.clone_df.copy()
cluster_summary = list()
assert groupvar in df.columns
assert "pgen" in df.columns
assert "count" in df.columns
for name, group in df.groupby([groupvar]):
public = self.ispublic(group, "subject")
cluster_summary.append({"cluster_index" : name,
"public" : public,
"min_pgen" : self.get_func_stat(gr = group, var = "pgen", func = np.min),
"median_pgen" : self.get_func_stat(gr = group, var = "pgen", func = np.median),
"max_pgen" : self.get_func_stat(gr = group, var = "pgen", func = np.max),
"cluster_count" : group.shape[0],
"seq_count" : self.get_func_stat(gr = group, var = "count", func = np.sum),
"seq_min" : self.get_func_stat(gr = group, var = "count", func = np.min),
"seq_median" : self.get_func_stat(gr = group, var = "count", func = np.median),
"seq_max" : self.get_func_stat(gr = group, var = "count", func = np.max)})
cluster_summary = pd.DataFrame(cluster_summary)
self.cluster_summary = cluster_summary
return cluster_summary
def tsne(self, X = None, n_components=2 , random_state = 310, axis_names = ["tSNE1","tSNE2"]):
warnings.warn("RUNNING sklearn.manifold.TSNE WHICH MAY TAKE A FEW MINUTES")
from sklearn.manifold import TSNE
if X is None:
X = self.paired_tcrdist
X_embedded = TSNE(n_components=n_components, metric = 'precomputed', random_state = random_state).fit_transform(X)
tsne_df = pd.DataFrame(X_embedded, columns = axis_names )
assert(tsne_df.shape[0] == self.clone_df.shape[0])
self.clone_df = pd.concat([self.clone_df, tsne_df], axis = 1)
def mds(self, X = None, n_components=2 , dissimilarity='precomputed', axis_names = ["MDS1","MDS2"]):
warnings.warn("RUNNING sklearn.manifold.MDS WHICH MAY TAKE A FEW MINUTES")
from sklearn.manifold import MDS
if X is None:
X = self.paired_tcrdist
X_embedded_mds = MDS(n_components=n_components, dissimilarity=dissimilarity).fit_transform(X)
mds_df = pd.DataFrame(X_embedded_mds, columns = axis_names)
assert(mds_df.shape[0] == self.clone_df.shape[0])
self.clone_df = pd.concat([self.clone_df, mds_df], axis = 1)
def _validate_organism(self):
if self.organism not in ["human", "mouse"]:
raise ValueError("organism must be 'mouse' or 'human'")
def _validate_chains(self):
"""
raise ValueError if invalid chains are passed to TCRrep __init__
"""
check_chains_arg = ['alpha', 'beta', "gamma", "delta"]
if len([c for c in self.chains if c not in check_chains_arg]) > 0:
raise ValueError('TCRrep chains arg can be one or more of the '
'following {} case-sensitive'.format(check_chains_arg))
def _validate_chain(self, chain):
if chain not in ['alpha', 'beta', "gamma", "delta"]:
raise ValueError('in compute_pairwise() chain must be one of the'
'following: "alpha", "beta", "gamma", "delta"' )
def _validate_cell_df(self):
"""
raise ValueError if is not properly formatted.
"""
if not isinstance(self.cell_df, pd.DataFrame):
raise ValueError('TCRrep argument must be pandas.DataFrame')
# TODO: When know, validator should check column names and datatypes
def _initialize_chain_specific_attributes(self):
"""
Initialize pw object and default substitution matrix (smat) based on
chains arguments.
Naming of all objects have a standardized order
region_chain_molecular_object
(cdr3)_(a|b|d|g)_(aa|p)_(pw|smat|hmat)
"""
if "alpha" in self.chains:
self.cdr3_a_aa_smat = 'blosum62'
self.cdr2_a_aa_smat = 'blosum62'
self.cdr1_a_aa_smat = 'blosum62'
self.pmhc_a_aa_smat = 'blosum62'
self.index_cols.append("cdr3_a_aa")
if 'beta' in self.chains:
self.cdr3_b_aa_smat = 'blosum62'
self.cdr2_b_aa_smat = 'blosum62'
self.cdr1_b_aa_smat = 'blosum62'
self.pmhc_b_aa_smat = 'blosum62'
self.index_cols.append("cdr3_b_aa")
if 'gamma' in self.chains:
self.cdr3_g_aa_smat = 'blosum62'
self.cdr2_g_aa_smat = 'blosum62'
self.cdr1_g_aa_smat = 'blosum62'
self.pmhc_g_aa_smat = 'blosum62'
self.index_cols.append("cdr3_g_aa")
if 'delta' in self.chains:
self.cdr3_d_aa_smat = 'blosum62'
self.cdr2_d_aa_smat = 'blosum62'
self.cdr1_d_aa_smat = 'blosum62'
self.pmhc_d_aa_smat = 'blosum62'
self.index_cols.append("cdr3_d_aa")
def _get_smat(self, chain, index_col):
"""
Gets the correct substitution matrix (smat) based on chain and column
Parameters
----------
chain : string
'alpha', 'beta', 'gamma', or 'delta'
index_col : string
[cdr3|cdr2|cdr1|pmhc]_[a|b|g|d]_aa_pw
"""
self._validate_chain(chain = chain)
if chain == "alpha":
if index_col.startswith("cdr3_a"):
smat = self.cdr3_a_aa_smat
elif index_col.startswith("cdr2_a"):
smat = self.cdr2_a_aa_smat
elif index_col.startswith("cdr1_a"):
smat = self.cdr1_a_aa_smat
elif index_col.startswith("pmhc_a"):
smat = self.pmhc_a_aa_smat
else:
smat = 'blosum62'
warnings.warn("Using default parasail.blosum62 because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
if chain == "beta":
if index_col.startswith("cdr3_b"):
smat = self.cdr3_b_aa_smat
elif index_col.startswith("cdr2_b"):
smat = self.cdr2_b_aa_smat
elif index_col.startswith("cdr1_b"):
smat = self.cdr1_b_aa_smat
elif index_col.startswith("pmhc_b"):
smat = self.pmhc_b_aa_smat
else:
smat = 'blosum62'
warnings.warn("Using default parasail.blosum62 because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
if chain == "gamma":
if index_col.startswith("cdr3_g"):
smat = self.cdr3_g_aa_smat
elif index_col.startswith("cdr2_g"):
smat = self.cdr2_g_aa_smat
elif index_col.startswith("cdr1_g"):
smat = self.cdr1_g_aa_smat
elif index_col.startswith("pmhc_g"):
smat = self.pmhc_g_aa_smat
else:
smat = 'blosum62'
warnings.warn("Using default parasail.blosum62 because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
if chain == "delta":
if index_col.startswith("cdr3_d"):
smat = self.cdr3_d_aa_smat
elif index_col.startswith("cdr2_d"):
smat = self.cdr2_d_aa_smat
elif index_col.startswith("cdr1_d"):
smat = self.cdr1_d_aa_smat
elif index_col.startswith("pmhc_d"):
smat = self.pmhc_d_aa_smat
else:
smat = 'blosum62'
warnings.warn("Using default parasail.blosum62 because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
return(smat)
def _assign_pw_result(self, pw, chain, index_col):
"""
Assigns pairwise result to TCRrep attribute based on chain and index_col
Parameters
----------
chain : string
'alpha', 'beta', 'gamma', or 'delta'
index_col : string
[cdr3|cdr2|cdr1|pmhc]_[a|b|g|d]_aa_pw
"""
self._validate_chain(chain = chain)
if chain == "alpha":
if index_col.startswith("cdr3_a"):
self.cdr3_a_aa_pw = pw
elif index_col.startswith("cdr2_a"):
self.cdr2_a_aa_pw = pw
elif index_col.startswith("cdr1_a"):
self.cdr1_a_aa_pw = pw
elif index_col.startswith("pmhc_a"):
self.pmhc_a_aa_pw = pw
else:
warnings.warn("No assignment for {} because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
elif chain == "beta":
if index_col.startswith("cdr3_b"):
self.cdr3_b_aa_pw = pw
elif index_col.startswith("cdr2_b"):
self.cdr2_b_aa_pw = pw
elif index_col.startswith("cdr1_b"):
self.cdr1_b_aa_pw = pw
elif index_col.startswith("pmhc_b"):
self.pmhc_b_aa_pw = pw
else:
warnings.warn("No assignment for {} because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
elif chain == 'gamma':
if index_col.startswith("cdr3_g"):
self.cdr3_g_aa_pw = pw
elif index_col.startswith("cdr2_g"):
self.cdr2_g_aa_pw = pw
elif index_col.startswith("cdr1_g"):
self.cdr1_g_aa_pw = pw
elif index_col.startswith("pmhc_g"):
self.pmhc_g_aa_pw = pw
else:
warnings.warn("No assignment for {} because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
elif chain == "delta":
if index_col.startswith("cdr3_d"):
self.cdr3_d_aa_pw = pw
elif index_col.startswith("cdr2_d"):
self.cdr2_d_aa_pw = pw
elif index_col.startswith("cdr1_d"):
self.cdr1_d_aa_pw = pw
elif index_col.startswith("pmhc_d"):
self.pmhc_d_aa_pw = pw
else:
warnings.warn("No assignment for {} because chain: '{}' does not matches region: '{}'".format(index_col, chain, index_col))
def _drop_smats(self):
"""
Need to drop ctypes if you are to pickle or copy this instance
"""
smats = [ k for k in self.__dir__() if k.endswith("aa_smat") ]
for k in smats:
self.__dict__[k] = None
def _pickle(self, filename):
self._drop_smats()
pickle.dump(self, open(filename , "wb") )
warnings.warn("all smats dropped because they are C objects that can't be pickled. reassign with _initialize_chain_specific_attributes()")
def _tcrdist_legacy_method_alpha_beta(self, processes = 1):
"""
Runs the legacy tcrdist pairwise comparison
Arguments
---------
processes : int
Notes
-----
# CALCULATE tcrdist distance metric. Here we show all the manual steps to
# implement the original Dash et al. tcrdist approach.
# To do this we calculate distance for each CDR separately, and
# we use the metric "tcrdist_cdr3" for the cdr3 and "tcrdist_cdr1"
# everywhere else
"""
if "gamma" in self.chains or "delta" in self.chains:
raise ValueError("TCRrep.chains contains `gamma`. You might want "\
"TCRrep._tcrdist_legacy_method_gamma_delta")
self.compute_pairwise_all(chain = "alpha", # <11
metric = 'tcrdist_cdr3',
compute_specific_region = 'cdr3_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "alpha", # 11
metric = "tcrdist_cdr1",
compute_specific_region = 'cdr1_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "alpha", # 11
metric = "tcrdist_cdr1",
compute_specific_region = 'cdr2_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "alpha", # 11
metric = "tcrdist_cdr1",
compute_specific_region = 'pmhc_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "beta", # 12
metric = 'tcrdist_cdr3',
#user_function = tcrdist_metric_align_cdr3s_false,
compute_specific_region = 'cdr3_b_aa',
processes = processes)
self.compute_pairwise_all(chain = "beta", # 12
metric = "tcrdist_cdr1",
compute_specific_region = 'cdr1_b_aa',
processes = processes)
self.compute_pairwise_all(chain = "beta", # 12
metric = "tcrdist_cdr1",
compute_specific_region = 'cdr2_b_aa',
processes = processes)
self.compute_pairwise_all(chain = "beta", # 12
metric = "tcrdist_cdr1",
compute_specific_region = 'pmhc_b_aa',
processes = processes)
distA = self.compute_paired_tcrdist(replacement_weights= {'cdr3_a_aa_pw': 1,
'cdr2_a_aa_pw': 1,
'cdr1_a_aa_pw': 1,
'pmhc_a_aa_pw': 1,
'cdr3_b_aa_pw': 0,
'cdr2_b_aa_pw': 0,
'cdr1_b_aa_pw': 0,
'pmhc_b_aa_pw': 0},
chains = ["alpha", "beta"])['paired_tcrdist'].copy()
distB = self.compute_paired_tcrdist(replacement_weights= {'cdr3_a_aa_pw': 0,
'cdr2_a_aa_pw': 0,
'cdr1_a_aa_pw': 0,
'pmhc_a_aa_pw': 0,
'cdr3_b_aa_pw': 1,
'cdr2_b_aa_pw': 1,
'cdr1_b_aa_pw': 1,
'pmhc_b_aa_pw': 1},
chains = ["alpha", "beta"])['paired_tcrdist'].copy()
# Calling tr.compute_paired_tcrdist() computs the
# the final paired chain TCR-distance which is stored as
# tr.paired_tcrdist, which we confirm is simply the sum of distA and distB
self.compute_paired_tcrdist(chains = self.chains)
assert np.all(((distA + distB) - self.paired_tcrdist) == 0)
self.pw_alpha = distA
self.pw_beta = distB
# tr.paired_tcrdist and distA, distB are np arrays, but we will want to work with as a pandas DataFrames
self.dist_a = pd.DataFrame(distA, index = self.clone_df.clone_id, columns = self.clone_df.clone_id)
self.dist_b = pd.DataFrame(distB, index = self.clone_df.clone_id, columns = self.clone_df.clone_id)
def _tcrdist_legacy_method_alpha(self, processes = 1):
"""
Runs the legacy tcrdist pairwise comparison
Arguments
---------
processes : int
Notes
-----
# CALCULATE tcrdist distance metric. Here we show all the manual steps to
# implement the original Dash et al. tcrdist approach.
# To do this we calculate distance for each CDR separately, and
# we use the metric "tcrdist_cdr3" for the cdr3 and "tcrdist_cdr1"
# everywhere else
"""
if "gamma" in self.chains or "delta" in self.chains:
raise ValueError("TCRrep.chains contains `gamma`. You might want "\
"TCRrep._tcrdist_legacy_method_gamma_delta")
self.compute_pairwise_all(chain = "alpha", # <11
metric = 'tcrdist_cdr3',
compute_specific_region = 'cdr3_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "alpha", # 11
metric = "tcrdist_cdr1",
compute_specific_region = 'cdr1_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "alpha", # 11
metric = "tcrdist_cdr1",
compute_specific_region = 'cdr2_a_aa',
processes = processes)
self.compute_pairwise_all(chain = "alpha", # 11
metric = "tcrdist_cdr1",
compute_specific_region = 'pmhc_a_aa',
processes = processes)
distA = self.compute_paired_tcrdist(replacement_weights= {'cdr3_a_aa_pw': 1,
'cdr2_a_aa_pw': 1,
'cdr1_a_aa_pw': 1,
'pmhc_a_aa_pw': 1},
chains = ["alpha"])['paired_tcrdist'].copy()
# Calling tr.compute_paired_tcrdist() computs the
# the final paired chain TCR-distance which is stored as
# tr.paired_tcrdist, which we confirm is simply the sum of distA and distB
self.compute_paired_tcrdist(chains = self.chains)
assert np.all((distA - self.paired_tcrdist) == 0)
# tr.paired_tcrdist and distA, distB are np arrays, but we will want to work with as a pandas DataFrames
self.dist_a =
|
pd.DataFrame(distA, index = self.clone_df.clone_id, columns = self.clone_df.clone_id)
|
pandas.DataFrame
|
#%% Imports and file loading
from pathlib import Path
import pandas as pd
import networkx as nx
import numpy as np
from graspy.plot import gridplot
from src.data import load_networkx
data_path = Path(
"./maggot_models/data/raw/Maggot-Brain-Connectome/4-color-matrices_Brain"
)
data_date = "2019-09-18-v2"
graph_types = ["axon-axon", "axon-dendrite", "dendrite-axon", "dendrite-dendrite"]
meta_data_file = "brain_meta-data"
input_counts_file = "input_counts"
output_path = Path("maggot_models/data/processed/2019-09-18-v2")
meta_data_path = data_path / data_date / (meta_data_file + ".csv")
meta_data_df =
|
pd.read_csv(meta_data_path, index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from math import sqrt
from dramkit.gentools import power
from dramkit.gentools import isnull
from dramkit.gentools import cal_pct
from dramkit.gentools import x_div_y
from dramkit.gentools import check_l_allin_l0
from dramkit.gentools import get_update_kwargs
from dramkit.gentools import con_count_ignore
from dramkit.gentools import replace_repeat_func_iter
from dramkit.datetimetools import diff_days_date
from dramkit.logtools.utils_logger import logger_show
from dramkit.plottools.plot_common import plot_series
from dramkit.plottools.plot_common import plot_series_conlabel
#%%
def signal_merge(data, sig1_col, sig2_col, merge_type=1):
'''
两个信号合并成一个信号
Parameters
----------
data : pandas.DataFrame
待处理数据,必须包含 ``sig1_col`` 和 ``sig2_col`` 指定的列
sig1_col, sig2_col : str
指定信号列,值为-1表示买(做多),1表示卖(做空)
merge_type : int
设置信号合并方式:
- 1: 两个信号出现任何一个都算有效信号
- 2: 根据两个信号的持仓量叠加计算交易信号(返回信号不适用反向开仓)
- 3: 只有两个信号方向相同时才算交易信号(返回信号不适用反向开仓)
:returns: `pd.Series` - 合并之后的信号
'''
df = data.reindex(columns=[sig1_col, sig2_col])
df.rename(columns={sig1_col: 'sig1', sig2_col: 'sig2'},
inplace=True)
if merge_type == 1:
df['sig'] = df['sig1'] + df['sig2']
df['sig'] = df['sig'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 2:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
elif merge_type == 3:
df['hold1'] = df['sig1'].replace(0, np.nan)
df['hold1'] = df['hold1'].fillna(method='ffill').fillna(0)
df['hold2'] = df['sig2'].replace(0, np.nan)
df['hold2'] = df['hold2'].fillna(method='ffill').fillna(0)
df['hold'] = df['hold1'] + df['hold2']
df['hold'] = df['hold'].apply(lambda x: 1 if x == 2 else \
(-1 if x == -2 else 0))
df['trade'] = df['hold'].diff()
df['sig'] = df['trade'].apply(lambda x: -1 if x < 0 else \
(1 if x > 0 else 0))
return df['sig']
#%%
def cal_cost_add(hold_vol, hold_cost, add_vol, add_price):
'''
| 计算加仓之后的平均持仓成本
| hold_vol为加仓前持仓量,hold_cost为加仓前平均持仓成本,add_vol为加仓量
'''
holdCost = hold_vol * hold_cost
totCost = holdCost + add_vol * add_price
return totCost / (hold_vol + add_vol)
def get_mean_cost(trade_records, dirt_col, price_col, vol_col):
'''
根据交易记录计算每期持仓成本
Parameters
----------
trade_records : pd.DataFrame
交易记录数据,必须包含 ``dirt_col`` 、 ``price_col`` 和 `vol_col` 指定的列
dirt_col : str
买卖方向列,1为买入(做多),-1为卖出(做空)
price_col : str
成交价格列
vol_col : str
为成交量列
:returns: `pd.DataFrame` - 在trade_records上增加了'holdVol', 'holdCost', 'meanCost'三列
'''
df = trade_records.copy()
ori_idx = df.index
df.index = range(0, df.shape[0])
vol_col_ = vol_col + '_'
df[vol_col_] = df[dirt_col] * df[vol_col]
df['holdVol'] = df[vol_col_].cumsum().round(4)
df.loc[df.index[0], 'holdCost'] = df[price_col].iloc[0] * df[vol_col_].iloc[0]
df.loc[df.index[0], 'meanCost'] = df[price_col].iloc[0]
for k in range(1, df.shape[0]):
holdVol_pre = df['holdVol'].iloc[k-1]
holdCost_pre = df['holdCost'].iloc[k-1]
holdVol = df['holdVol'].iloc[k]
tradeVol = df[vol_col_].iloc[k]
if tradeVol == 0:
holdCost, meanCost = holdCost_pre, df['meanCost'].iloc[k-1]
elif holdVol == 0: # 平仓
holdCost, meanCost = 0, 0
elif holdVol_pre >= 0 and holdVol > holdVol_pre: # 买入开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre >= 0 and holdVol > 0 and holdVol < holdVol_pre: # 买入减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre >= 0 and holdVol < 0: # 买入平仓反向卖出
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
elif holdVol_pre <= 0 and holdVol < holdVol_pre: # 卖出开仓或加仓
tradeVal = df[vol_col_].iloc[k] * df[price_col].iloc[k]
holdCost = holdCost_pre + tradeVal
meanCost = holdCost / holdVol
elif holdVol_pre <= 0 and holdVol < 0 and holdVol > holdVol_pre: # 卖出减仓
meanCost = df['meanCost'].iloc[k-1]
holdCost = meanCost * holdVol
elif holdVol_pre <= 0 and holdVol > 0: # 卖出平仓反向买入
meanCost = df[price_col].iloc[k]
holdCost = holdVol * meanCost
df.loc[df.index[k], 'holdCost'] = holdCost
df.loc[df.index[k], 'meanCost'] = meanCost
df.index = ori_idx
return df
#%%
def cal_gain_con_futures(price_open, price_now, n, player,
fee=0.1/100, lever=100,
n_future2target=0.001):
'''
永续合约收益计算,如火币BTC合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(张)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
n_future2target : float
一份合约对应的标的数量
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open * n_future2target / lever
price_now_ = price_now * n_future2target / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
def cal_gain_con_futures2(price_open, price_now, n, player,
fee=0.1/100, lever=100):
'''
永续合约收益计算,如币安ETH合约
Parameters
----------
price_open : float
开仓价格
price_now : float
现价
n : int
数量(标的量)
player : str
做空或做多
fee : float
手续费比例
lever : int
杠杆
Returns
-------
gain_lever : float
盈亏金额
gain_pct : float
盈亏比例
'''
if n == 0:
return 0, 0
b_name = ['buyer', 'Buyer', 'b', 'B', 'buy', 'Buy']
s_name = ['seller', 'Seller', 'seler', 'Seler', 's', 'S', 'sell',
'Sell', 'sel', 'Sel']
price_cost_ = price_open / lever
price_now_ = price_now / lever
if player in b_name:
Cost = price_cost_ * n * (1+fee)
Get = price_now_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Cost
elif player in s_name:
Cost = price_now_ * n * (1+fee)
Get = price_cost_ * n * (1-fee)
gain = Get - Cost
gain_pct = gain / Get
gain_lever = gain * lever
return gain_lever, gain_pct
#%%
def cal_expect_return(hit_prob, gain_loss_ratio):
'''根据胜率和盈亏比计算期望收益'''
return hit_prob*gain_loss_ratio - (1-hit_prob)
def cal_gain_pct_log(price_cost, price, pct_cost0=1):
'''
| 计算对数收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost == 0:
return pct_cost0
elif price_cost > 0:
return np.log(price) - np.log(price_cost)
else:
raise ValueError('price_cost必须大于等于0!')
def cal_gain_pct(price_cost, price, pct_cost0=1):
'''
| 计算百分比收益率
| price_cost为成本
| price为现价
| pct_cost0为成本price_cost为0时的返回值
Note
----
默认以权利方成本price_cost为正(eg. 买入价为100,则price_cost=100)、
义务方成本price_cost为负进行计算(eg. 卖出价为100,则price_cost=-100)
'''
if isnull(price_cost) or isnull(price):
return np.nan
if price_cost > 0:
return price / price_cost - 1
elif price_cost < 0:
return 1 - price / price_cost
else:
return pct_cost0
def cal_gain_pcts(price_series, gain_type='pct',
pct_cost0=1, logger=None):
'''
| 计算资产价值序列price_series(`pd.Series`)每个时间的收益率
| gain_type:
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
| pct_cost0为当成本为0时收益率的指定值
'''
if (price_series <= 0).sum() > 0:
gain_type = 'pct'
logger_show('存在小于等于0的值,将用百分比收益率代替对数收益率!',
logger, 'warning')
if gain_type == 'pct':
df = pd.DataFrame({'price_now': price_series})
df['price_cost'] = df['price_now'].shift(1)
df['pct'] = df[['price_cost', 'price_now']].apply(lambda x:
cal_gain_pct(x['price_cost'], x['price_now'],
pct_cost0=pct_cost0), axis=1)
return df['pct']
elif gain_type == 'log':
return price_series.apply(np.log).diff()
elif gain_type == 'dif':
return price_series.diff()
else:
raise ValueError('未识别的`gain_type`,请检查!')
#%%
def cal_beta(values_target, values_base, gain_type='pct', pct_cost0=1):
'''
| 计算贝塔系数
| values_target, values_base分别为目标价值序列和基准价值序列
| gain_type和pct_cost0同 :func:`dramkit.fintools.utils_gains.cal_gain_pcts` 中的参数
| 参考:
| https://www.joinquant.com/help/api/help#api:风险指标
| https://blog.csdn.net/thfyshz/article/details/83443783
'''
values_target = pd.Series(values_target)
values_base = pd.Series(values_base)
pcts_target = cal_gain_pcts(values_target, gain_type=gain_type, pct_cost0=pct_cost0)
pcts_base = cal_gain_pcts(values_base, gain_type=gain_type, pct_cost0=pct_cost0)
pcts_target = pcts_target.iloc[1:]
pcts_base = pcts_base.iloc[1:]
return np.cov(pcts_target, pcts_base)[0][1] / np.var(pcts_base, ddof=1)
def cal_alpha_beta(values_target, values_base, r0=3.0/100, nn=252,
gain_type='pct', rtype='exp', pct_cost0=1, logger=None):
'''
| 计算alpha和beta系数
| 参数参考 :func:`cal_beta` 和 :func:`cal_returns_period` 函数
'''
r = cal_returns_period(values_target, gain_type=gain_type, rtype=rtype,
nn=nn, pct_cost0=pct_cost0, logger=logger)
r_base = cal_returns_period(values_base, gain_type=gain_type, rtype=rtype,
nn=nn, pct_cost0=pct_cost0, logger=logger)
beta = cal_beta(values_target, values_base,
gain_type=gain_type, pct_cost0=pct_cost0)
return r - (r0 + beta*(r_base-r0)), beta
def cal_alpha_by_beta_and_r(r, r_base, beta, r0=3.0/100):
'''
| 根据年化收益以及beta计算alpha
| r为策略年化收益,r_base为基准年化收益,r0为无风险收益率,beta为策略beta值
'''
return r - (r0 + beta*(r_base-r0))
#%%
def cal_return_period_by_gain_pct(gain_pct, n, nn=250, rtype='exp',
gain_pct_type='pct'):
'''
给定最终收益率gain_pct,计算周期化收益率
Parameters
----------
gain_pct : float
给定的最终收益率
n : int
期数
nn : int
| 一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
rtype : str
周期化时采用指数方式'exp'或平均方式'mean'
gain_pct_type : str
| 设置最终收益率gain_pct得来的计算方式,可选'pct', 'log'
| 默认为百分比收益,若为对数收益,则计算周期化收益率时只能采用平均法,不能用指数法
.. hint::
| 百分比收益率:
| 复利(指数)公式:1 + R = (1 + r) ^ (n / nn) ——> r = (1 + R) ^ (nn / n) - 1
| 单利(平均)公式:1 + R = 1 + r * (n / nn) ——> r = R * nn / n
| 对数收益率:
| R = r * (n / nn) ——> r = R * nn / n(采用对数收益率计算年化收益只能用平均法)
Returns
-------
r : float
周期化收益率,其周期由nn确定
References
----------
https://zhuanlan.zhihu.com/p/112211063
'''
if gain_pct_type in ['log', 'ln', 'lg']:
rtype = 'mean' # 对数收益率只能采用平均法进行周期化
if rtype == 'exp':
r = power(1 + gain_pct, nn / n) - 1
elif rtype == 'mean':
r = nn * gain_pct / n
return r
def cal_ext_return_period_by_gain_pct(gain_pct, gain_pct_base, n,
nn=250, rtype='exp',
gain_pct_type='pct',
ext_type=1):
'''
| 给定收益率和基准收益率,计算周期化超额收益率
| rtype周期化收益率方法,可选'exp'或'mean'或'log'
| ext_type设置超额收益率计算方式:
| 若为1,先算各自周期化收益率,再相减
| 若为2,先相减,再周期化算超额
| 若为3,先还原两者实际净值,再以相对于基准净值的收益计算周期化超额
| 其他参数意义同 :func:`cal_return_period_by_gain_pct` 函数
| 参考:
| https://xueqiu.com/1930958059/167803003?page=1
'''
if rtype == 'log':
ext_type = 3
if ext_type == 1:
p1 = cal_return_period_by_gain_pct(gain_pct, n, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
p2 = cal_return_period_by_gain_pct(gain_pct_base, n, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
return p1 - p2
elif ext_type == 2:
p = cal_return_period_by_gain_pct(gain_pct-gain_pct_base, n, nn=nn,
rtype=rtype, gain_pct_type=gain_pct_type)
return p
if ext_type == 3:
if gain_pct_type in ['log', 'ln', 'lg']:
p = np.exp(gain_pct)
p_base = np.exp(gain_pct_base)
elif gain_pct_type == 'pct':
p = 1 + gain_pct
p_base = 1 + gain_pct_base
if rtype == 'exp':
return power(p / p_base, nn / n) - 1
elif rtype == 'mean':
return (p / p_base - 1) * nn / n
elif rtype == 'log':
return (np.log(p) - np.log(p_base)) * nn / n
else:
raise ValueError('未识别的ext_type参数,请检查!')
def cal_ext_return_period(values, values_base, gain_type='pct', rtype='exp',
nn=250, pct_cost0=1, ext_type=1, logger=None):
'''
| 根据给定价格或价值序列计values和基准序列values_base,算超额收益
| pct_cost0参考 :func:`cal_gain_pct` 和 :func:`cal_gain_pct_log` 函数
| 其它参数参考 :func:`cal_ext_return_period_by_gain_pct` 函数
'''
values, values_base = np.array(values), np.array(values_base)
n1, n0 = len(values), len(values_base)
if n1 != n0:
raise ValueError('两个序列长度不相等,请检查!')
if gain_type == 'log':
if (values[0] <= 0 or values_base[-1] <= 0) or \
(values_base[0] <= 0 or values_base[-1] <= 0):
logger_show('发现开始值或结束值为负,用百分比收益率代替对数收益率!',
logger, 'warning')
p1 = cal_gain_pct(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct(values_base[0], values_base[-1], pct_cost0=pct_cost0)
gain_pct_type = 'pct'
else:
p1 = cal_gain_pct_log(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct_log(values_base[0], values_base[-1], pct_cost0=pct_cost0)
rtype = 'mean' # 采用对数收益率计算年化收益只能用平均法
gain_pct_type = 'log'
elif gain_type == 'pct':
p1 = cal_gain_pct(values[0], values[-1], pct_cost0=pct_cost0)
p0 = cal_gain_pct(values_base[0], values_base[-1], pct_cost0=pct_cost0)
gain_pct_type = 'pct'
elif gain_type == 'dif':
p1 = values[-1] - values[0]
p1 = values_base[-1] - values_base[0]
rtype = 'mean'
gain_pct_type = 'pct'
else:
raise ValueError('未识别的`gain_gype`,请检查!')
extr = cal_ext_return_period_by_gain_pct(p1, p0, n1, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type,
ext_type=ext_type)
return extr
def cal_returns_period(price_series, gain_type='pct', rtype='exp',
nn=252, pct_cost0=1, logger=None):
'''
计算周期化收益率
Parameters
----------
price_series : pd.Series, np.array, list
资产价值序列(有负值时不能使用对数收益率)
gain_type : str
| 收益率计算方式设置
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
rtype : str
| 收益率周期化时采用指数方式'exp'或平均方式'mean'
| (采用对数收益率计算年化收益只能用平均法)
nn : int
| 一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
pct_cost0 : float
成本为0时收益率的指定值,参见 :func:`cal_gain_pct` 和 :func:`cal_gain_pct_log` 函数
Returns
-------
r : float
周期化收益率,其周期由nn确定
See Also
--------
:func:`cal_return_period_by_gain_pct`
'''
price_series = np.array(price_series)
n_ = len(price_series)
if gain_type == 'log':
if price_series[0] <= 0 or price_series[-1] <= 0:
logger_show('发现开始值或结束值为负,用百分比收益率代替对数收益率!',
logger, 'warning')
gain_pct = cal_gain_pct(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
gain_pct_type = 'pct'
else:
gain_pct = cal_gain_pct_log(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
rtype = 'mean' # 采用对数收益率计算年化收益只能用平均法
gain_pct_type = 'log'
elif gain_type == 'pct':
gain_pct = cal_gain_pct(price_series[0], price_series[-1],
pct_cost0=pct_cost0)
gain_pct_type = 'pct'
elif gain_type == 'dif':
gain_pct = price_series[-1] - price_series[0]
gain_pct_type = 'pct'
rtype = 'mean'
else:
raise ValueError('未识别的`gain_type`,请检查!')
r = cal_return_period_by_gain_pct(gain_pct, n_, nn=nn, rtype=rtype,
gain_pct_type=gain_pct_type)
return r
def cal_returns_period_mean(price_series, gain_type='pct', nn=252,
pct_cost0=1, logger=None):
'''
| 计算周期化收益率,采用收益率直接平均的方法
| price_series为资产价值序列,pd.Series或list或np.array(有负值时不能使用对数收益率)
| gain_type和pct_cost0参数参见 :func:`cal_gain_pcts`
| nn为一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
'''
price_series = pd.Series(price_series)
price_series.name = 'series'
df = pd.DataFrame(price_series)
# 收益率
df['gain_pct'] = cal_gain_pcts(price_series, gain_type=gain_type,
pct_cost0=pct_cost0, logger=logger)
return nn * df['gain_pct'].mean()
def cal_volatility(price_series, gain_type='pct', nn=252,
pct_cost0=0, logger=None):
'''
| 价格序列price_series的周期化波动率计算
| price_series为资产价值序列,pd.Series或list或np.array(有负值时不能使用对数收益率)
| gain_type和pct_cost0参数参见 :func:`cal_gain_pcts`
| nn为一个完整周期包含的期数,eg.
| 若price_series周期为日,求年化收益率时nn一般为252(一年的交易日数)
| 若price_series周期为日,求月度收益率时nn一般为21(一个月的交易日数)
| 若price_series周期为分钟,求年化收益率时nn一般为252*240(一年的交易分钟数)
| 返回收益波动率,其周期由nn确定
| 参考:
| https://wiki.mbalib.com/wiki/%E5%8E%86%E5%8F%B2%E6%B3%A2%E5%8A%A8%E7%8E%87
'''
price_series = pd.Series(price_series)
price_series.name = 'series'
df = pd.DataFrame(price_series)
# 收益率
df['gain_pct'] = cal_gain_pcts(price_series, gain_type=gain_type,
pct_cost0=pct_cost0, logger=logger)
# 波动率
r = df['gain_pct'].std(ddof=1) * sqrt(nn) # ddof为1表示计算标准差时分母为n-1
return r
def cal_sharpe(values, r=3/100, nn=252, gain_type='pct',
ann_rtype='exp', pct_cost0=1, logger=None):
'''
| 计算夏普比率,先算期望收益和波动率,再算夏普
| r为无风险收益率
| 其余参数间 :func:`cal_returns_period` 和 :func:`cal_volatility`
'''
return_ann = cal_returns_period(values, gain_type=gain_type,
rtype=ann_rtype, nn=nn,
pct_cost0=pct_cost0, logger=logger)
volatility = cal_volatility(values, gain_type=gain_type,
nn=nn, pct_cost0=pct_cost0,
logger=logger)
sharpe = (return_ann - r) / volatility
return sharpe
def cal_sharpe2(values, r=3/100, nn=252, gain_type='pct',
pct_cost0=1, logger=None):
'''
计算夏普比率
Parameters
----------
values : pd.Series
资产价值序列
r : float
无风险收益率
nn : int
| 无风险收益率r的周期所包含的values的周期数,eg.
| 若values周期为日,r为年化无风险收益率时,N一般为252(一年的交易日数)
| 若values周期为日,r月度无风险收益率时,N一般为21(一个月的交易日数)
| 若values周期为分钟,r为年化无风险收益率时,N一般为252*240(一年的交易分钟数)
gain_type : str
| 收益率计算方式设置
| 为'pct',使用普通百分比收益
| 为'log',使用对数收益率(当price_series存在小于等于0时不能使用)
| 为'dif',收益率为前后差值(适用于累加净值情况)
References
----------
- https://www.joinquant.com/help/api/help?name=api#风险指标
- https://www.zhihu.com/question/348938505/answer/1848898074
- https://blog.csdn.net/thfyshz/article/details/83443783
- https://www.jianshu.com/p/363aa2dd3441 (夏普计算方法貌似有误)
'''
df = pd.DataFrame({'values': values})
df['gains'] = cal_gain_pcts(df['values'], gain_type=gain_type,
pct_cost0=pct_cost0, logger=logger) # 收益率序列
df['gains_ex'] = df['gains'] - r/nn # 超额收益
return sqrt(nn) * df['gains_ex'].mean() / df['gains_ex'].std()
def get_maxdown(values, return_idx=True, abs_val=False):
'''
最大回撤计算
Parameters
----------
values : list, np.array, pd.Series
资产价值序列
return_idx : bool
是否返回最大回撤区间起止位置,若为False,则起止位置返回None
abs_val : bool
若为True,则计算最大回撤时采用亏损绝对值而不是亏损比率
Returns
-------
maxdown : float
最大回撤幅度(正值)
start_end_idxs : tuple
最大回撤起止位置(start_idx, end_idx),若return_idx为False,则为(None, None)
References
----------
- https://www.cnblogs.com/xunziji/p/6760019.html
- https://blog.csdn.net/changyan_123/article/details/80994170
'''
n = len(values)
data = np.array(values)
if not return_idx:
maxdown, tmp_max = 0, -np.inf
for k in range(1, n):
tmp_max = max(tmp_max, data[k-1])
if not abs_val:
maxdown = min(maxdown, data[k] / tmp_max - 1)
else:
maxdown = min(maxdown, data[k] - tmp_max)
start_end_idxs = (None, None)
return -maxdown, start_end_idxs
else:
Cmax, Cmax_idxs = np.zeros(n-1), [0 for _ in range(n-1)]
tmp_max = -np.inf
tmp_idx = 0
for k in range(1, n):
if data[k-1] > tmp_max:
tmp_max = data[k-1]
tmp_idx = k-1
Cmax[k-1] = tmp_max
Cmax_idxs[k-1] = tmp_idx
maxdown = 0.0
start_idx, end_idx = 0, 0
for k in range(1, n):
if not abs_val:
tmp = data[k] / Cmax[k-1] - 1
else:
tmp = data[k] - Cmax[k-1]
if tmp < maxdown:
maxdown = tmp
start_idx, end_idx = Cmax_idxs[k-1], k
start_end_idxs = (start_idx, end_idx)
return -maxdown, start_end_idxs
def get_maxdown_all(values, abs_val=False):
'''计算区间每个时期的最大回撤'''
n = len(values)
data = np.array(values)
maxdown_all= [0.0]
maxdown, tmp_max = 0, -np.inf
for k in range(1, n):
tmp_max = max(tmp_max, data[k-1])
if not abs_val:
maxdown = min(maxdown, data[k] / tmp_max - 1)
maxdown_all.append(maxdown)
else:
maxdown = min(maxdown, data[k] - tmp_max)
maxdown_all.append(maxdown)
return np.array(maxdown_all)
def get_maxdown_dy(values, abs_val=False):
'''计算动态最大回撤(每个时间点之前的最高值到当前的回撤)'''
data = pd.DataFrame({'values': values})
data['cummax'] = data['values'].cummax()
if not abs_val:
data['dyMaxDown'] = data['values'] / data['cummax'] - 1
else:
data['dyMaxDown'] = data['values'] - data['cummax']
return np.array(data['dyMaxDown'])
def get_maxup(values, return_idx=True, abs_val=False):
'''
最大盈利计算(与最大回撤 :func:`dramkit.fintools.utils_gains.get_maxdown`
相对应,即做空情况下的最大回撤)
'''
n = len(values)
data = np.array(values)
if not return_idx:
maxup, tmp_min = 0, np.inf
for k in range(1, n):
tmp_min = min(tmp_min, data[k-1])
if not abs_val:
maxup = max(maxup, data[k] / tmp_min - 1)
else:
maxup = max(maxup, data[k] - tmp_min)
return maxup, (None, None)
else:
Cmin, Cmin_idxs = np.zeros(n-1), [0 for _ in range(n-1)]
tmp_min = np.inf
tmp_idx = 0
for k in range(1, n):
if data[k-1] < tmp_min:
tmp_min = data[k-1]
tmp_idx = k-1
Cmin[k-1] = tmp_min
Cmin_idxs[k-1] = tmp_idx
maxup = 0.0
start_idx, end_idx = 0, 0
for k in range(1, n):
if not abs_val:
tmp = data[k] / Cmin[k-1] - 1
else:
tmp = data[k] - Cmin[k-1]
if tmp > maxup:
maxup = tmp
start_idx, end_idx = Cmin_idxs[k-1], k
return maxup, (start_idx, end_idx)
def get_maxdown_pd(series, abs_val=False):
'''
使用pd计算最大回撤计算
Parameters
----------
series : pd.Series, np.array, list
资产价值序列
abs_val : bool
若为True,则计算最大回撤时采用亏损绝对值而不是亏损比率
Returns
-------
maxdown : float
最大回撤幅度(正值)
start_end_idxs : tuple
最大回撤起止索引:(start_idx, end_idx)
start_end_iloc : tuple
最大回撤起止位置(int):(start_iloc, end_iloc)
'''
df =
|
pd.DataFrame(series)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0], dtype='int64')]
assert (not libalgos.is_lexsorted(failure))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns int64
expected = np.argsort(a, kind='mergesort')
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns int64
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c =
|
Categorical([1, 'a', 'a'])
|
pandas.Categorical
|
import sys
import csv
import pandas as pd
import ctdcal.sbe_reader as sbe_rd
import ctdcal.sbe_equations_dict as sbe_eq
import gsw
DEBUG = False
#lookup table for sensor data
###DOUBLE CHECK TYPE IS CORRECT###
short_lookup = {
'55':{'short_name': 'CTDTMP', 'long_name':'SBE 3+ Temperature', 'units': 'ITS-90', 'type': 'float64'},
'45':{'short_name': 'CTDPRS', 'long_name':'SBE 9+ Pressure', 'units': 'DBAR', 'type': 'float64'},
'3':{'short_name': 'CTDCOND', 'long_name':'SBE 4 Conductivity', 'units': 'MSPCM', 'type':'float64'},
'38':{'short_name': 'CTDOXY', 'long_name':'SBE 43 Oxygen', 'units': 'MLPL', 'type':'float64'},
#'38':{'short_name': 'CTDOXYVOLTS', 'long_name':'SBE 43 Oxygen Volts', 'units': '0-5VDC', 'type':'float64'},
'11':{'short_name': 'FLUOR', 'long_name':'Seapoint Fluorometer', 'units': '0-5VDC', 'type':'float64'},
'27':{'short_name': 'FREE', 'long_name':'empty', 'units':'NA', 'type':'NA'},
'0':{'short_name': 'ALT', 'long_name':'Altitude', 'units':'M', 'type':'float64'},
'71':{'short_name': 'CTDXMISS', 'long_name':'CStar', 'units': '0-5VDC', 'type':'float64'},
'61':{'short_name': 'U_DEF', 'long_name':'user defined', 'units':'0-5VDC', 'type':'float64'},
'1000':{'short_name': 'CTDSAL', 'long_name':'Salinity (C1 T1)', 'units':'PSU', 'type':'float64'},
'20':{'short_name': 'CTDFLUOR', 'long_name':'WetlabECO_AFL_FL_Sensor', 'units':'0-5VDC', 'type':'float64'}, #check short_name later
'42':{'short_name':'PAR', 'long_name':'PAR/Irradiance, Biospherical/Licor', 'units':'0-5VDC', 'type':'float64'},
'51':{'short_name':'REF_PAR', 'long_name':'Surface PAR/Irradiance, Biospherical/Licor', 'units':'0-5VDC', 'type':'float64'},
'70':{'short_name': 'CTDBACKSCATTER', 'long_name': 'WetlabECO_BB_Sensor', 'units':'0-5VDC', 'type':'float64'}
}
def debugPrint(*args, **kwargs):
if DEBUG:
errPrint(*args, **kwargs)
def errPrint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def convertFromFiles(hex_file, xmlcon_file, debug=False):
"""Handler to convert engineering data to sci units automatically.
Takes the full path and filename of the .hex and .XMLCON as arguments.
Optionally takes a boolean debug flag to specify whether or not to display
verbose messages to stderr
"""
global DEBUG
DEBUG = debug
sbeReader = sbe_rd.SBEReader.from_paths(hex_file, xmlcon_file)
return convertFromSBEReader(sbeReader, DEBUG)
def convertFromSBEReader(sbeReader, debug=False):
"""Handler to convert engineering data to sci units automatically.
Takes SBEReader object that is already connected to the .hex and .XMLCON files.
Optionally takes a boolean debug flag to specify whether or not to display
verbose messages to stderr
"""
global DEBUG
DEBUG = debug
# Retrieve parsed scans
rawData = sbeReader.parsed_scans
# Convert raw data to dataframe
raw_df = pd.DataFrame(rawData)
raw_df.index.name = 'index'
raw_df = raw_df.apply(pd.to_numeric, errors="ignore")
#debugPrint("Raw Data Types:", raw_df.dtypes)
#debugPrint("Raw Data:", raw_df.head)
# Retrieve Config data
rawConfig = sbeReader.parsed_config()
# The meta data field needs to be processed seperately and then joined with the converted_df
debugPrint("Building meta data dataframe... ", end='')
metaArray = [line.split(',') for line in sbeReader._parse_scans_meta().tolist()]
metaArrayheaders = sbeReader._breakdown_header()
meta_df =
|
pd.DataFrame(metaArray)
|
pandas.DataFrame
|
"""
This file loads the data from the data directory and shows you how.
Feel free to change the contents of this file!
Do ensure these functions remain functional:
- get_business(city, business_id)
- get_reviews(city, business_id=None, user_id=None, n=10)
- get_user(username)
"""
import state
import os
import json
import time
import numpy
import pandas
import random
DATA_DIR = "yelp-all"
CITIES = {}
BUSINESSES = {}
REVIEWS = {}
USERS = {}
UTILITY = []
SIMILARITY = []
UTILITY_CATEGORIES = []
SIMILARITY_CATEGORIES = []
# - - - - - - - - - - - - - - - - load functions - - - - - - - - - - - - - - - #
def load_cities(state_abbr=None):
"""
Finds all cities (all directory names) in ./DATA_DIR
Returns a list of city names
"""
if state_abbr:
return state.process_cities(DATA_DIR, state_abbr)
return os.listdir(DATA_DIR)
def load(cities, data_filename, to_remove=[]):
"""
Given a list of city names,
for each city extract all data from ./DATA_DIR/<city>/<data_filename>.json
Returns a dictionary of the form:
{
<city1>: [<entry1>, <entry2>, ...],
<city2>: [<entry1>, <entry2>, ...],
...
}
"""
data = {}
for city in cities:
city_data = []
with open(f"{DATA_DIR}/{city}/{data_filename}.json", "r") as f:
for line in f:
l = json.loads(line)
for key in to_remove:
l.pop(key, None)
city_data.append(l)
data[city] = city_data
return data
# - - - - - - - - - - - - - - - helper functions - - - - - - - - - - - - - - - #
def check(business, treshold):
return business['review_count'] >= treshold
def trim(reviews, to_delete):
return [r for r in reviews if not r['business_id'] in to_delete]
def mem_usage(panda):
if isinstance(panda, pandas.DataFrame):
usage_b = panda.memory_usage(deep=True).sum()
else:
# we assume if not a df it's a series
usage_b = panda.memory_usage(deep=True)
# convert bytes to megabytes
usage_mb = usage_b / 1024 ** 2
return "{:03.2f} MB".format(usage_mb)
def to_pandas(CITIES, SET):
set_list = []
for city in CITIES:
set_frame = pandas.DataFrame.from_dict(SET[city]).assign(city=city)
set_list.append(set_frame)
# append all datapoints to one DataFrame
return pandas.concat(set_list, ignore_index=True, sort=True)
def optimize(frame, types):
# convert columns to optimal types
frame = frame.astype(types)
# optimize float and int sizes
floats = frame.select_dtypes(include=['float'])
converted_float = floats.apply(pandas.to_numeric, downcast='float')
frame[converted_float.columns] = converted_float
ints = frame.select_dtypes(include=['int'])
converted_int = ints.apply(pandas.to_numeric, downcast='unsigned')
frame[converted_int.columns] = converted_int
# return finished DataFrame
return frame
def cosine_similarity(business1, business2):
""" Calculate cosine similarity between two businesses. """
# select for users that have rated both businesses
index1 = numpy.argwhere(~pandas.isnull(business1))
index2 = numpy.argwhere(~pandas.isnull(business2))
selected = numpy.intersect1d(index1, index2)
if not selected.any():
return 0
# get the ratings
ratings1 = business1[selected]
ratings2 = business2[selected]
# calculate cosine similarity
numerator = (ratings1 * ratings2).sum()
denumerator = numpy.sqrt((ratings1 ** 2).sum()) * numpy.sqrt((ratings2 ** 2).sum())
return numerator / denumerator if denumerator != 0 else 0
def calculate_similarity(utility):
""" Creates similarity matrix based on cosine similarity. """
from scipy.spatial.distance import pdist, squareform
matrix = squareform(pdist(utility, cosine_similarity))
numpy.fill_diagonal(matrix, 1)
return pandas.DataFrame(matrix, columns=utility.index, index=utility.index)
def split_categories(row):
cat = row['categories']
if not cat:
return
|
pandas.Series([row['business_id']] + [])
|
pandas.Series
|
"""
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
ensure_int64, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or pandas.IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : pandas.Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
pandas.Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
pandas.IntervalIndex : Immutable Index implementing an ordered,
sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or pandas.Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.datetime64
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.timedelta64
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if
|
is_datetime64tz_dtype(dtype)
|
pandas.core.dtypes.common.is_datetime64tz_dtype
|
""" Universal Sentence Encoding loaded as a lambda function into a Keras model
https://www.dlology.com/blog/keras-meets-universal-sentence-encoder-transfer-learning-for-text-data/
## QUESTION_CATEGORIES
ABBR - 'abbreviation': expression abbreviated, etc.
DESC - 'description and abstract concepts': manner of an action, description of sth. etc.
ENTY - 'entities': animals, colors, events, food, etc.
HUM - 'human beings': a group or organization of persons, an individual, etc.
LOC - 'locations': cities, countries, etc.
NUM - 'numeric values': postcodes, dates, speed,temperature, etc
"""
import re
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from keras import layers
from keras import Model
from keras import backend as K
# from qa_datasets import load_trec_trainset
# Import the Universal Sentence Encoder's TF Hub module
use_encode = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-large/3")
def encode_texts(texts=["That band rocks!", "That song is really cool."], use_encode=use_encode):
texts = [texts] if isinstance(texts, str) else texts
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
usevectors = session.run(use_encode(texts))
return usevectors
def get_dataframe(filename='train_5500.txt'):
lines = open(filename, 'r').read().splitlines()
data = []
for i in range(0, len(lines)):
label = lines[i].split(' ')[0]
label = label.split(":")[0]
text = ' '.join(lines[i].split(' ')[1:])
text = re.sub(r'[^A-Za-z0-9 ,\?\'\"-._\+\!/\`@=;:]+', ' ', text)
data.append([label, text])
df = pd.DataFrame(data, columns=['label', 'text'])
df = df[~df.label.isnull()]
df = df[df.label.str.len().astype(bool)]
df['label'] = df.label.astype('category')
return df
def use_lambda(x):
return use_encode(tf.squeeze(tf.cast(x, tf.string)), signature="default", as_dict=True)["default"]
def normalize_trainset(df_train='train_5500.txt'):
df_train = get_dataframe(df_train) if isinstance(df_train, str) else df_train
QA_CATEGORIES = df_train.label.cat.categories.tolist()
# df_train = load_trec_trainset()
# print(df_train.head())
print(df_train.head())
train_text = df_train['text'].tolist()
train_text = np.array(train_text, dtype=object)[:, np.newaxis]
train_label = np.asarray(pd.get_dummies(df_train.label), dtype=np.int8)
return train_text, train_label
def build_use_classifier(num_classes=7):
usevector_shape = (512,)
input_text = layers.Input(shape=(1,), dtype=tf.string)
usevector = layers.Lambda(use_lambda, output_shape=usevector_shape)(input_text)
dense = layers.Dense(256, activation='relu')(usevector)
pred = layers.Dense(num_classes, activation='softmax')(dense)
model = Model(inputs=[input_text], outputs=pred)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def train_model(model, train_texts=None, train_labels=None, filename='model.h5'):
with tf.Session() as session:
K.set_session(session)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
history = model.fit(train_texts, train_labels,
# validation_data=(test_text, test_label),
epochs=10,
batch_size=32)
model.save_weights(filename)
print(f'Saved model to {filename}.')
return history
def test_model(model='model.h5', texts=None, categories=None):
texts = np.array(texts, dtype=object)[:, np.newaxis]
with tf.Session() as session:
K.set_session(session)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
if isinstance(model, str):
filename = model
model = build_use_classifier(num_classes=len(categories))
print(f'Loading model from {filename}')
model.load_weights(filename)
predictions = model.predict(texts, batch_size=32)
predict_logits = predictions.argmax(axis=1)
predicted_labels = [categories[logit] for logit in predict_logits]
print(predicted_labels)
df =
|
pd.DataFrame(predictions)
|
pandas.DataFrame
|
import re
import os
from glob import glob
from astropy.io import fits
import pandas as pd
import numpy as np
from progressbar import ProgressBar
bosz_bibtex = """
@ARTICLE{2017AJ....153..234B,
author = {{<NAME>. and {M{\'e}sz{\'a}ros}, S. and {Fleming}, S.~W. and
{Gordon}, K.~D. and {Koekemoer}, A.~M. and {Kov{\'a}cs}, J.},
title = "{A New Stellar Atmosphere Grid and Comparisons with HST/STIS CALSPEC Flux Distributions}",
journal = {\aj},
archivePrefix = "arXiv",
eprint = {1704.00653},
primaryClass = "astro-ph.SR",
keywords = {stars: atmospheres, stars: fundamental parameters, techniques: spectroscopic},
year = 2017,
month = may,
volume = 153,
eid = {234},
pages = {234},
doi = {10.3847/1538-3881/aa6ba9},
adsurl = {http://adsabs.harvard.edu/abs/2017AJ....153..234B},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
"""
bosz_meta = {'bibtex':bosz_bibtex,
'parameters':['teff', 'logg', 'mh', 'ch', 'alpha', 'rot', 'micro'],
'wavelength_unit':'Angstrom',
'wavelength_type':'vacuum',
'flux_unit': 'erg/s/cm^2/angstrom'}
def make_raw_index():
"""
Read all Phoenix files and generate a raw index with filename association.
Returns
-------
bosz_index : pd.DataFrame
"""
all_fnames = glob('ascii/insbroad_300000/*/*/*/*.asc.bz2')
nfiles = len(all_fnames)
mh_arr = np.zeros(nfiles)
ch_arr = np.zeros(nfiles)
alpha_arr = np.zeros(nfiles)
teff_arr = np.zeros(nfiles)
logg_arr = np.zeros(nfiles)
micro_arr = np.zeros(nfiles)
rot_arr = np.zeros(nfiles)
res_arr = np.zeros(nfiles)
pattern = re.compile('a(mp|mm)(\d+)(cp|cm)(\d+)+(op|om)(\d+)t(\d+)g(\d+)v(\d+)modrt(\d+)b(\d+)')
pattern_dir = re.compile('metal_(.....)\/carbon_(.....)\/alpha_(.....)')
for i in np.arange(nfiles):
filename = all_fnames[i]
base = filename.split('.')[0]
s = pattern.search(filename)
s2 = pattern_dir.search(filename)
mm,mh,cm,ch,om,alpha,teff,logg,micro,rot,res = s.group(1,2,3,4,5,6,7,8,9,10,11)
mh,ch,alpha = s2.group(1,2,3) # use the directory names for more accurate grid points
logg = logg[0]+'.'+logg[1:]
micro = micro[0]+'.'+micro[1:]
mh_arr[i] = float(mh)
ch_arr[i] = float(ch)
alpha_arr[i] = float(alpha)
teff_arr[i] = float(teff)
logg_arr[i] = float(logg)
micro_arr[i] = float(micro)
rot_arr[i] = float(rot)
res_arr[i] = float(res)
return pd.DataFrame({'mh':mh_arr,'ch':ch_arr,'alpha':alpha_arr,'teff':teff_arr,
'logg':logg_arr,'micro':micro_arr,'rot':rot_arr,
'res':res_arr,'filename':all_fnames})
def make_grid_info(fname):
"""
Make the HDF5 Grid Info file
Parameters
----------
fname: str
"""
raw_index = make_raw_index()
wavelength = np.loadtxt(raw_index.loc[0, 'filename'], usecols=(0,), unpack=True)
with pd.HDFStore(fname) as fh:
fh['index'] = raw_index
fh['wavelength'] =
|
pd.DataFrame(wavelength)
|
pandas.DataFrame
|
"""
Tests of Tax-Calculator utility functions.
"""
# CODING-STYLE CHECKS:
# pycodestyle test_utils.py
# pylint --disable=locally-disabled test_utils.py
#
# pylint: disable=missing-docstring,no-member,protected-access,too-many-lines
from __future__ import print_function
import os
import math
import random
import numpy as np
import pandas as pd
import pytest
# pylint: disable=import-error
from taxcalc import Policy, Records, Behavior, Calculator
from taxcalc.utils import (DIST_VARIABLES,
DIST_TABLE_COLUMNS, DIST_TABLE_LABELS,
DIFF_VARIABLES,
DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS,
SOI_AGI_BINS,
create_distribution_table, create_difference_table,
weighted_count_lt_zero, weighted_count_gt_zero,
weighted_count, weighted_sum, weighted_mean,
wage_weighted, agi_weighted,
expanded_income_weighted,
add_income_table_row_variable,
add_quantile_table_row_variable,
mtr_graph_data, atr_graph_data, dec_graph_data,
xtr_graph_plot, write_graph_file,
read_egg_csv, read_egg_json, delete_file,
bootstrap_se_ci,
certainty_equivalent,
ce_aftertax_expanded_income,
nonsmall_diffs,
quantity_response)
DATA = [[1.0, 2, 'a'],
[-1.0, 4, 'a'],
[3.0, 6, 'a'],
[2.0, 4, 'b'],
[3.0, 6, 'b']]
WEIGHT_DATA = [[1.0, 2.0, 10.0],
[2.0, 4.0, 20.0],
[3.0, 6.0, 30.0]]
DATA_FLOAT = [[1.0, 2, 'a'],
[-1.0, 4, 'a'],
[0.0000000001, 3, 'a'],
[-0.0000000001, 1, 'a'],
[3.0, 6, 'a'],
[2.0, 4, 'b'],
[0.0000000001, 3, 'b'],
[-0.0000000001, 1, 'b'],
[3.0, 6, 'b']]
def test_validity_of_name_lists():
assert len(DIST_TABLE_COLUMNS) == len(DIST_TABLE_LABELS)
Records.read_var_info()
assert set(DIST_VARIABLES).issubset(Records.CALCULATED_VARS | {'s006'})
extra_vars_set = set(['num_returns_StandardDed',
'num_returns_ItemDed',
'num_returns_AMT'])
assert (set(DIST_TABLE_COLUMNS) - set(DIST_VARIABLES)) == extra_vars_set
def test_create_tables(cps_subsample):
# pylint: disable=too-many-statements,too-many-branches
# create a current-law Policy object and Calculator object calc1
rec = Records.cps_constructor(data=cps_subsample)
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
calc1.calc_all()
# create a policy-reform Policy object and Calculator object calc2
reform = {2013: {'_II_rt1': [0.15]}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
calc2.calc_all()
test_failure = False
# test creating various difference tables
diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
calc2.dataframe(DIFF_VARIABLES),
'standard_income_bins', 'combined')
assert isinstance(diff, pd.DataFrame)
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.20,
-0.67,
-0.78,
-0.71,
-0.82,
-0.79,
-0.73,
-0.64,
-0.23,
-0.09,
-0.06,
-0.58]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xbin', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
calc2.dataframe(DIFF_VARIABLES),
'weighted_deciles', 'combined')
assert isinstance(diff, pd.DataFrame)
tabcol = 'tot_change'
expected = [0,
0,
121721713,
1799074733,
2655187813,
3306079845,
4468286112,
5576666034,
7188935504,
8314048550,
10398339206,
9129031991,
52957371499,
5726291219,
2821882221,
580858551]
if not np.allclose(diff[tabcol].values, expected,
atol=0.51, rtol=0.0):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'share_of_change'
expected = [0.00,
0.00,
0.23,
3.40,
5.01,
6.24,
8.44,
10.53,
13.57,
15.70,
19.64,
17.24,
100.00,
10.81,
5.33,
1.10]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.15,
-0.76,
-0.78,
-0.75,
-0.79,
-0.79,
-0.79,
-0.72,
-0.68,
-0.28,
-0.58,
-0.53,
-0.23,
-0.06]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.15,
-0.76,
-0.78,
-0.75,
-0.79,
-0.79,
-0.79,
-0.72,
-0.68,
-0.28,
-0.58,
-0.53,
-0.23,
-0.06]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
# test creating various distribution tables
dist, _ = calc2.distribution_tables(None, 'weighted_deciles')
assert isinstance(dist, pd.DataFrame)
tabcol = 'iitax'
expected = [0,
0,
-2439074403,
-1234901725,
-609273185,
2687658386,
19501356849,
29465049377,
48681577048,
88747972386,
163479377840,
709224809867,
1057504552440,
153548408569,
219064860852,
336611540446]
if not np.allclose(dist[tabcol].values, expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'num_returns_ItemDed'
expected = [0,
0,
326236,
1253241,
2240460,
2828475,
4741957,
5510030,
6883022,
8358806,
10667610,
12037635,
54847474,
5893249,
4820479,
1323906]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'expanded_income'
expected = [0,
0,
87249858210,
258005174639,
369648687648,
482950933444,
637031080899,
799835240295,
1047137967700,
1349212863519,
1849316366473,
4236199144621,
11116587317446,
1362651371493,
1589763961227,
1283783811901]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'aftertax_income'
expected = [0,
0,
82063918307,
234849286479,
336461183613,
435772857489,
559917984490,
697963511720,
906200715535,
1150438396510,
1516372357769,
3226734653812,
9146774865725,
1082675191375,
1250757557050,
893301905386]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
dist, _ = calc2.distribution_tables(None, 'standard_income_bins')
assert isinstance(dist, pd.DataFrame)
tabcol = 'iitax'
expected = [0,
0,
-822217116,
-2113487293,
-1785384383,
4299002729,
21451400591,
62343670148,
93389591704,
293234582500,
292465924986,
100158506284,
194882962290,
1057504552440]
if not np.allclose(dist[tabcol], expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xbin', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'num_returns_ItemDed'
expected = [0,
0,
60455,
1302001,
2927384,
3350721,
4499431,
10181119,
8996491,
16350238,
6326459,
541189,
311987,
54847474]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xbin', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
if test_failure:
assert 1 == 2
def test_diff_count_precision():
"""
Estimate bootstrap standard error and confidence interval for count
statistics ('tax_cut' and 'tax_inc') in difference table generated
using puf.csv input data taking no account of tbi privacy fuzzing and
assuming all filing units in each bin have the same weight. These
assumptions imply that the estimates produced here are likely to
over-estimate the precision of the count statistics.
Background information on unweighted number of filing units by bin:
DECILE BINS:
0 16268
1 14897
2 13620
3 15760
4 16426
5 18070
6 18348
7 19352
8 21051
9 61733 <--- largest unweighted bin count
A 215525
STANDARD BINS:
0 7081 <--- negative income bin is dropped in TaxBrain display
1 19355
2 22722
3 20098
4 17088
5 14515
6 24760
7 15875
8 25225
9 15123
10 10570 <--- smallest unweighted bin count
11 23113 <--- second largest unweighted WEBAPP bin count
A 215525
Background information on Trump2017.json reform used in TaxBrain run 16649:
STANDARD bin 10 ($500-1000 thousand) has weighted count of 1179 thousand;
weighted count of units with tax increase is 32 thousand.
So, the mean weight for all units in STANDARD bin 10 is 111.5421 and the
unweighted number with a tax increase is 287 assuming all units in that
bin have the same weight. (Note that 287 * 111.5421 is about 32,012.58,
which rounds to the 32 thousand shown in the TaxBrain difference table.)
STANDARD bin 11 ($1000+ thousand) has weighted count of 636 thousand;
weighted count of units with tax increase is 27 thousand.
So, the mean weight for all units in STANDARD bin 11 is about 27.517 and
the unweighted number with a tax increase is 981 assuming all units in
that bin have the same weight. (Note that 981 * 27.517 is about 26,994.18,
which rounds to the 27 thousand shown in the TaxBrain difference table.)
"""
dump = False # setting to True implies results printed and test fails
seed = 123456789
bs_samples = 1000
alpha = 0.025 # implies 95% confidence interval
# compute stderr and confidence interval for STANDARD bin 10 increase count
data_list = [111.5421] * 287 + [0.0] * (10570 - 287)
assert len(data_list) == 10570
data = np.array(data_list)
assert (data > 0).sum() == 287
data_estimate = np.sum(data) * 1e-3
assert abs((data_estimate / 32) - 1) < 0.0005
bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)
stderr = bsd['se'] * 1e-3
cilo = bsd['cilo'] * 1e-3
cihi = bsd['cihi'] * 1e-3
if dump:
res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
print(
res.format('STANDARD-BIN10: ',
data_estimate, bs_samples, alpha, stderr, cilo, cihi)
)
assert abs((stderr / 1.90) - 1) < 0.0008
# NOTE: a se of 1.90 thousand implies that when comparing the difference
# in the weighted number of filing units in STANDARD bin 10 with a
# tax increase, the difference statistic has a bigger se (because
# the variance of the difference is the sum of the variances of the
# two point estimates). So, in STANDARD bin 10 if the point
# estimates both had se = 1.90, then the difference in the point
# estimates has has a se = 2.687. This means that the difference
# would have to be over 5 thousand in order for there to be high
# confidence that the difference was different from zero in a
# statistically significant manner.
# Or put a different way, a difference of 1 thousand cannot be
# accurately detected while a difference of 10 thousand can be
# accurately detected.
assert abs((cilo / 28.33) - 1) < 0.0012
assert abs((cihi / 35.81) - 1) < 0.0012
# compute stderr and confidence interval for STANDARD bin 11 increase count
data_list = [27.517] * 981 + [0.0] * (23113 - 981)
assert len(data_list) == 23113
data = np.array(data_list)
assert (data > 0).sum() == 981
data_estimate = np.sum(data) * 1e-3
assert abs((data_estimate / 27) - 1) < 0.0005
bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)
stderr = bsd['se'] * 1e-3
cilo = bsd['cilo'] * 1e-3
cihi = bsd['cihi'] * 1e-3
if dump:
res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
print(
res.format('STANDARD-BIN11: ',
data_estimate, bs_samples, alpha, stderr, cilo, cihi)
)
assert abs((stderr / 0.85) - 1) < 0.0040
# NOTE: a se of 0.85 thousand implies that when comparing the difference
# in the weighted number of filing units in STANDARD bin 11 with a
# tax increase, the difference statistic has a bigger se (because
# the variance of the difference is the sum of the variances of the
# two point estimates). So, in STANDARD bin 11 if point estimates
# both had se = 0.85, then the difference in the point estimates has
# has a se = 1.20. This means that the difference would have to be
# over 2.5 thousand in order for there to be high confidence that the
# difference was different from zero in a statistically significant
# manner.
# Or put a different way, a difference of 1 thousand cannot be
# accurately detected while a difference of 10 thousand can be
# accurately detected.
assert abs((cilo / 25.37) - 1) < 0.0012
assert abs((cihi / 28.65) - 1) < 0.0012
# fail if doing dump
assert not dump
def test_weighted_count_lt_zero():
df1 = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
grped = df1.groupby('label')
diffs = grped.apply(weighted_count_lt_zero, 'tax_diff')
exp = pd.Series(data=[4, 0], index=['a', 'b'])
exp.index.name = 'label'
pd.util.testing.assert_series_equal(exp, diffs)
df2 = pd.DataFrame(data=DATA_FLOAT, columns=['tax_diff', 's006', 'label'])
grped = df2.groupby('label')
diffs = grped.apply(weighted_count_lt_zero, 'tax_diff')
exp =
|
pd.Series(data=[4, 0], index=['a', 'b'])
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
'''
'''
import time
import pandas as pd
import datarobot as dr
from datarobot.models.modeljob import wait_for_async_model_creation
import numpy as np
import re
import os
from datarobot.errors import JobAlreadyRequested
token_id = ""
ts_setting = {"project_name":"fake_job_posting_210123","filename":"../Data/fake_job_postings.csv", \
"project_id": "60089b3d23aace3eea1810d0","model_id":"", \
"feature_list": "Informative Features","features":[],"set":"validation" , \
"AUC":"Weighted AUC", "LogLoss":"Weighted LogLoss", \
"downsampling": 36,"holdout_pct": 20,"validation_pct":16,"target":"fraudulent" }
parameter_name = ['stop_words','stemmer','num_ngram',"use_idf","pos_tagging"]
value = [1,"porter",[1,2,3,4],1,1]
param_df = pd.DataFrame(list(zip(parameter_name, value)),
columns =['parameter_name', 'value'])
dr.Client(token=token_id, endpoint='https://app.datarobot.com/api/v2')
def check_if_number(st):
tp = re.search("\d+",st)
if tp:
return int(tp.group())
else:
return np.nan
def get_min_max_salary (text):
'''
Get the min and max from the salary_range
:param text: string
:return: the min and max of a salary_range
'''
if type(text) == str:
if re.search("\-",text):
tp = text.split("-")
min_salary = check_if_number(tp[0].strip())
max_salary = check_if_number(tp[1].strip())
return min_salary,max_salary
else:
return np.nan,np.nan
else:
return np.nan, np.nan
def cleaned_location(text):
'''
Extract country, and country_and state from location
:param text: string with country, state, city
:return:
'''
country_state = ""
st = str(text)
if type(st) is str:
tp = re.search("[a-zA-Z]{2,}\s?\,(\s*[a-zA-Z0-9]+|\s)",st)
if tp:
country_state = tp.group().strip()
country = st.strip()[0:2]
else:
return "",""
return country,country_state
else:
return "",""
def create_binary_cat_for_education(text):
if pd.isnull(text) or pd.isna(text):
return "no"
elif text == "unspecified":
return "no"
else:
return "yes"
def PrepareDataSet():
'''
Prepare the dataset for fake_job_postings by adding new features.
:return: enriched original dataset with new features
'''
fake_jobs_df = pd.read_csv(ts_setting["filename"])
fake_jobs_df.min_salary = np.nan
fake_jobs_df.max_salary = np.nan
fake_jobs_df.salary_diff = np.nan
fake_jobs_df["min_salary"],fake_jobs_df["max_salary"] = zip(*fake_jobs_df["salary_range"].apply(get_min_max_salary))
fake_jobs_df["min_salary"] = pd.to_numeric(fake_jobs_df["min_salary"])
fake_jobs_df["max_salary"] = pd.to_numeric(fake_jobs_df["max_salary"])
fake_jobs_df["education_flag"] = [create_binary_cat_for_education(x) for x in fake_jobs_df["required_education"]]
fake_jobs_df["salary_range"] = fake_jobs_df.max_salary - fake_jobs_df.min_salary
fake_jobs_df["salary_diff"] = fake_jobs_df["salary_range"]/fake_jobs_df["min_salary"]
return fake_jobs_df
def start_project_with_settings(fake_jobs_df):
'''
Run a project for fake_jobs_df
:param fake_jobs_df: already enriched dataset
:return: project
'''
global ts_setting
advanced_options = dr.AdvancedOptions(
response_cap=0.7,
blueprint_threshold=2,
smart_downsampled=True, majority_downsampling_rate=ts_setting["downsampling"])
partition = dr.StratifiedTVH(ts_setting["holdout_pct"],ts_setting["validation_pct"], seed=0)
pandas_dataset = dr.Dataset.create_from_in_memory_data(data_frame=fake_jobs_df.drop(columns = ["job_id"]))
project = pandas_dataset.create_project(project_name = ts_setting["project_name"])
project.set_target(target= ts_setting["target"],mode = dr.enums.AUTOPILOT_MODE.QUICK,
partitioning_method=partition,
advanced_options = advanced_options,
worker_count = -1)
project.unlock_holdout()
project.wait_for_autopilot(verbosity=dr.VERBOSITY_LEVEL.SILENT)
return project
'''
From the project, find features, DataRobot set as text features
'''
def get_text_features(project):
'''
get text features
:param project: DataRobot Project
:return: list of features of type text
'''
raw = [feat_list for feat_list in project.get_featurelists()\
if feat_list.name == ts_setting["feature_list"]][0]
text_features = [
feat
for feat in raw.features if dr.Feature.get(project.id, feat).feature_type == "Text"
]
return text_features
#Get all the models for a given text field
def get_1_model_performance(model_p,text_feature,num_modified):
'''
Extract a model metrics
:param model_p: model of interest
:param text_feature: list of features of type text
:param num_modified: number of parameters modified
:return: performance of type dict
'''
global ts_setting
performance = {}
try:
roc = model_p.get_roc_curve(ts_setting["set"])
threshold = roc.get_best_f1_threshold()
metrics = roc.estimate_threshold(threshold)
performance = {"model_id":model_p.id,"text_feature":text_feature,"AUC":model_p.metrics[ts_setting["AUC"]][ts_setting["set"]], \
"sample_pct":model_p.sample_pct,
"LogLoss":model_p.metrics[ts_setting["LogLoss"]][ts_setting["set"]],
'f1_score':metrics['f1_score'],"sample_pct":model_p.sample_pct,\
'true_negative_rate': metrics['true_negative_rate'],
'false_positive_rate':metrics['false_positive_rate'],
'true_positive_rate':metrics['true_positive_rate'],\
'positive_predictive_value':metrics['positive_predictive_value'],\
'negative_predictive_value':metrics['negative_predictive_value'],\
'threshold':metrics['threshold'],'parameters_modified': num_modified}
return performance
except:
performance = {"model_id": model_p.id, "text_feature": text_feature,
"AUC": 0, \
"sample_pct": model_p.sample_pct,
"LogLoss": 1,
'f1_score': 0, "sample_pct": model_p.sample_pct, \
'true_negative_rate': 0,
'false_positive_rate': 0,
'true_positive_rate': 0, \
'positive_predictive_value': 0, \
'negative_predictive_value': 0, \
'threshold': 0, 'parameters_modified': num_modified}
return performance
#Get all the models for a given text field
#This function will have 2 uses: First, it will be used to find the best AutoTuned model for the
#text features, and then it will be used to compare the best model before the pre-processing and
#after the pre-processing. Keep only models that used less than 100 of dataset
def models_performance_for_text(text_feature,project):
'''
extract all models built only for text features
:param text_feature: list of features of type text
:param project: DataRobot project
:return: all models trained on less than 100% and trained on only the text features (Auto-Tuned Word N-gram )
'''
models_desc =project.get_models(
search_params={
'name': text_feature
})
df= pd.DataFrame()
for model_p in models_desc:
tmp_df = get_1_model_performance(model_p,text_feature,0)
if tmp_df:
if tmp_df["sample_pct"] < 100.00:
df = df.append(tmp_df, ignore_index=True)
return df
def get_best_models_before_text(project):
'''
get the best models for each text features. This function calls get_text_features, and models_performance_for_text
:param project: DataRobot project
:return: best models id and logloss metric
'''
text_features= get_text_features(project)
models_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from redshift_upload import upload, testing_utilities
import pandas
import pytest
table_name = (
"unit_" + __file__.replace("\\", "/").split("/")[-1].split(".")[0]
) # we would just use __name__, but we don't want to run into __main__ if called directly
@pytest.fixture(autouse=True)
def setup_and_teardown():
testing_utilities.drop_tables(table_name)
yield # this pauses the function for the tests to run
testing_utilities.drop_tables(table_name)
df1 =
|
pandas.DataFrame([{"a": "hi"}, {"a": "hi"}] * 10)
|
pandas.DataFrame
|
from contextlib import ExitStack as does_not_raise # noqa: N813
import numpy as np
import pandas as pd
import pytest
from sid.msm import _flatten_index
from sid.msm import _harmonize_input
from sid.msm import _is_diagonal
from sid.msm import get_diag_weighting_matrix
from sid.msm import get_flat_moments
from sid.msm import get_msm_func
def dummy_simulate(_params): # noqa: U101
return
|
pd.Series([1, 2])
|
pandas.Series
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#----------------
#---on series
#create a list
labels = ['a', 'b', 'c']
#create another list
mylist = [10,20,30]
#create a pandas series
my_series =
|
pd.Series(data=mylist)
|
pandas.Series
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_markers: region,endregion
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Data Mining Project 2 Spring semester 2019-2020</center>
# ## <center>Παναγιώτης Ευαγγελίου   1115201500039</center>
# ## <center>Γεώργιος Μαραγκοζάκης   1115201500089</center>
# ___
# ### Do all the necessary imports for this notebook
# region
# data processing
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from nltk.corpus import stopwords as nltkStopwords
from string import punctuation, digits
import re
from nltk import word_tokenize
from nltk.stem import PorterStemmer
# visualization
from wordcloud import WordCloud
from IPython.display import Image
from IPython.display import display
from itertools import cycle
import matplotlib.patches as mpatches
# classification
from sklearn.model_selection import KFold, cross_validate
from sklearn import svm, preprocessing
from sklearn.metrics import classification_report, make_scorer, accuracy_score, \
precision_score, recall_score, f1_score, roc_curve, auc,\
roc_auc_score, plot_roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
import scipy
from collections import Counter
import gensim
import random
from operator import add
# vectorization
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# clustering
from nltk.cluster import KMeansClusterer, cosine_distance
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
# for data exploration
import os
import numpy as np
# endregion
# ## __Dataset Preprocessing__
# - ### *Make tsv files from all the txt files*
# region
myCategoriesFolder = ['business','entertainment','politics', 'sport', 'tech']
dataPathDir = './fulltext/data/'
myDataSetDf = pd.DataFrame(columns=['ID', 'TITLE', 'CONTENT', 'CATEGORY'])
id_count = 0
for category in myCategoriesFolder:
specificPath = dataPathDir + category + '/'
# find the column's names of each csv
for fileName in os.listdir(specificPath):
# we need to check only .txt files
if fileName.endswith(".txt"):
thisTxt = open(os.path.join(specificPath, fileName),"r")
thisTxtTitle = thisTxt.readline()
# get rid of '\n' on the end of title line
thisTxtTitle = thisTxtTitle.replace('\n', '')
thisTxtContent = thisTxt.readlines()
# get rid of empty lines '\n'
thisTxtContent = list(filter(lambda a: a != '\n', thisTxtContent))
# get rid of '\n' on the end of each line
thisTxtContent = [period.replace('\n', '') for period in thisTxtContent]
# convert list of lines into a single string line
thisTxtContent = ' '.join(thisTxtContent)
myDataSetDf = myDataSetDf.append({'ID': id_count, 'TITLE': thisTxtTitle, 'CONTENT': thisTxtContent, 'CATEGORY': category.upper()}, ignore_index=True)
thisTxt.close()
id_count += 1
display(myDataSetDf)
# endregion
# ## __Make wordcloud for each category__
def makeWordCloud(myText, saveLocationPath, myMaxWords=100, myMask=None, myStopWords=None):
'''Default function for generating wordcloud'''
wc = WordCloud(background_color="white", mask=myMask, max_words=myMaxWords,
stopwords=myStopWords, contour_width=3, contour_color='steelblue',
width=600, height=600)
# generate word cloud
wc.generate(myText)
# store to file
wc.to_file(saveLocationPath)
return saveLocationPath
def columnToText(myDfColumn):
wholeColumnText = ''
for text in myDfColumn:
wholeColumnText = wholeColumnText + ' ' + text
return wholeColumnText
stopWords = ENGLISH_STOP_WORDS
myAdditionalStopWords = ['say','said', 'new', 'need', 'year']
stopWords = (stopWords.union(myAdditionalStopWords))
# - ### *Business Wordcloud*
# region
makeWordCloud(saveLocationPath="businessWordCloud.png", myText=columnToText(myDataSetDf[myDataSetDf['CATEGORY'] == "BUSINESS"]['CONTENT']), myStopWords=stopWords)
Image('businessWordCloud.png')
# endregion
# - ### *Entertainment Wordcloud*
# region
makeWordCloud(saveLocationPath="entertainmentWordCloud.png", myText=columnToText(myDataSetDf[myDataSetDf['CATEGORY'] == "ENTERTAINMENT"]['CONTENT']), myStopWords=stopWords)
Image('entertainmentWordCloud.png')
# endregion
# - ### *Politics Wordcloud*
# region
makeWordCloud(saveLocationPath="politicsWordCloud.png", myText=columnToText(myDataSetDf[myDataSetDf['CATEGORY'] == "POLITICS"]['CONTENT']), myStopWords=stopWords)
Image('politicsWordCloud.png')
# endregion
# - ### *Sport Wordcloud*
# region
makeWordCloud(saveLocationPath="sportWordCloud.png", myText=columnToText(myDataSetDf[myDataSetDf['CATEGORY'] == "SPORT"]['CONTENT']), myStopWords=stopWords)
Image('sportWordCloud.png')
# endregion
# - ### *Tech Wordcloud*
# region
makeWordCloud(saveLocationPath="techWordCloud.png", myText=columnToText(myDataSetDf[myDataSetDf['CATEGORY'] == "TECH"]['CONTENT']), myStopWords=stopWords)
Image('techWordCloud.png')
# endregion
# ## __Classification__
def scoresReportCv(clf, trainX, trainY):
"""
Printing scores using cross_val_score
"""
print('----Report for 10-fold Cross Validation----')
scoring = {'Accuracy' : make_scorer(accuracy_score),
'Precision' : make_scorer(precision_score, average='weighted'),
'Recall' : make_scorer(recall_score, average='weighted'),
'F1' : make_scorer(f1_score, average='weighted')}
scores = cross_validate(clf, trainX, trainY, cv=10, scoring=scoring)
print ('Precision \t %0.2f' % (scores['test_Precision'].mean()))
print ('Recalls \t %0.2f' % (scores['test_Recall'].mean()))
print ('F-Measure \t %0.2f' % (scores['test_F1'].mean()))
print("Accuracy: \t %0.2f (+/- %0.2f)" % (scores['test_Accuracy'].mean(), scores['test_Accuracy'].std() * 2))
def makeRocPlot(labelTest, predictions, labelEncoder, mySubplots = None):
# Binarize the output
labelsAsNumber = [i for i in range(0,len(labelEncoder.classes_))]
labelTest = label_binarize(labelTest, classes=labelsAsNumber)
n_classes = labelTest.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(labelTest[:, i], predictions[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(labelTest.ravel(), predictions.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
lw = 2
if mySubplots is not None: # subplots for 10-CV
# Plot all ROC curves
mySubplots.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
mySubplots.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'forestgreen', 'maroon'])
for i, color in zip(range(n_classes), colors):
mySubplots.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(labelEncoder.classes_[i], roc_auc[i]))
mySubplots.plot([0, 1], [0, 1], 'k--', lw=lw)
mySubplots.axis(xmin=0.0,xmax=1.0, ymin=0.0, ymax=1.05)
mySubplots.set_xlabel('False Positive Rate')
mySubplots.set_ylabel('True Positive Rate')
mySubplots.legend(loc="lower right")
else:
# Plot all ROC curves
plt.figure(figsize=(12, 12))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'forestgreen', 'maroon'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(labelEncoder.classes_[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC plot of all classes')
plt.legend(loc="lower right")
def makeRocPlotsCV (clf, trainX, trainY, labelEncoder):
# make rocPlots for each fold in 10 CV
f, axs = plt.subplots(5, 2)
f.set_figheight(30)
f.set_figwidth(30)
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=10)
i = 0
z = 0
k = 1
for train, test in cv.split(trainX, trainY):
y_score = clf.fit(trainX[train], trainY[train]).predict_proba(trainX[test])
makeRocPlot(trainY[test], y_score, labelEncoder, axs[i, z])
axs[i, z].set_title('Roc Plot for fold - {0}'.format(k))
k += 1
if z == 1:
i += 1
z = 0
else:
z = 1
plt.show()
# - #### Classification using SVM classifier
def SvmClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the SVM classifier of scikit-learn
"""
clf = svm.SVC(kernel='linear', C=1, probability=True)
# use 10-fold Cross Validation
scoresReportCv(clf, trainX, trainY)
print('----Roc Plots for 10-fold Cross Validation----')
makeRocPlotsCV (clf, trainX, trainY, labelEncoder)
# fit train set
clf.fit(trainX, trainY)
# Predict test set
predY = clf.predict(testX)
# Classification_report
print('\n----Report for predictions on test dataset----')
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
print('\n----ROC plot for predictions on test dataset----')
y_score = clf.predict_proba(testX)
makeRocPlot(testY, y_score, labelEncoder)
plt.show()
return accuracy_score(testY, predY)
# we will use gridSearchCV with svm only one time for demonstration as it is slow
def SvmClassificationGridSearchCVDemo(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the SVM classifier of scikit-learn with gridSearchCV
"""
parameters = {'kernel':('linear', 'rbf'), 'C':[0.1, 1, 10], 'gamma':('scale', 'auto')}
svc = svm.SVC(probability=True)
clf = GridSearchCV(svc, parameters, n_jobs = -1)
# fit train set
clf.fit(trainX, trainY)
# Predict test set
predY = clf.predict(testX)
# Classification_report
print('\n----Report for predictions on test dataset with GridSearchCV----')
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
print('\n----ROC plot for predictions on test dataset with GridSearchCV----')
y_score = clf.predict_proba(testX)
makeRocPlot(testY, y_score, labelEncoder)
plt.show()
# - #### Classification using Random Forests classifier
def RandomForestClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the Random Forest classifier of scikit-learn
"""
clf = RandomForestClassifier()
# use 10-fold Cross Validation
scoresReportCv(clf, trainX, trainY)
print('----Roc Plots for 10-fold Cross Validation----')
makeRocPlotsCV (clf, trainX, trainY, labelEncoder)
# fit train set
clf.fit(trainX, trainY)
# Predict test set
predY = clf.predict(testX)
# Classification_report
print('\n----Report for predictions on test dataset----')
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
print('\n----ROC plot for predictions on test dataset----')
y_score = clf.predict_proba(testX)
makeRocPlot(testY, y_score, labelEncoder)
plt.show()
return accuracy_score(testY, predY)
# - #### Classification using Naive Bayes classifier
def NaiveBayesClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the Naive Bayes classifier of scikit-learn
"""
clf = GaussianNB()
trainX = trainX.toarray()
# use 10-fold Cross Validation
scoresReportCv(clf, trainX, trainY)
print('----Roc Plots for 10-fold Cross Validation----')
makeRocPlotsCV (clf, trainX, trainY, labelEncoder)
# fit train set
clf.fit(trainX, trainY)
# Predict test set
testX = testX.toarray()
predY = clf.predict(testX)
# Classification_report
print('\n----Report for predictions on test dataset----')
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
print('\n----ROC plot for predictions on test dataset----')
y_score = clf.predict_proba(testX)
makeRocPlot(testY, y_score, labelEncoder)
plt.show()
return accuracy_score(testY, predY)
# - #### Classification using K-Nearest Neighbor classifier
# Our implemantion is based on this link https://towardsdatascience.com/k-nearest-neighbor-classifier-from-scratch-in-python-698e3de97063
# and this link https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/
# region
# calculate the Euclidean distance between two 1d-arrays
def distance(instance1, instance2):
return scipy.spatial.distance.euclidean(instance1, instance2)
def get_neighbors(training_set,
labels,
test_instance,
k,
distance=distance):
"""
get_neighors calculates a list of the k nearest neighbors
of an instance 'test_instance'.
The list neighbors contains 3-tuples with
(index, dist, label)
where
index is the index from the training_set,
dist is the distance between the test_instance and the
instance training_set[index]
distance is a reference to a function used to calculate the
distances
"""
distances = []
for index in range(len(training_set)):
dist = distance(test_instance, training_set[index])
distances.append((training_set[index], dist, labels[index]))
distances.sort(key=lambda x: x[1])
neighbors = distances[:k]
return neighbors
# The function 'vote' returns the most common class. (Majority Voting)
def vote(neighbors):
class_counter = Counter()
for neighbor in neighbors:
class_counter[neighbor[2]] += 1
return class_counter.most_common(1)[0][0]
# ‘vote_prob’ is a function like ‘vote’ but returns the probability for all classes (like clf.predict_proba())
def vote_prob(neighbors):
class_counter = Counter()
for neighbor in neighbors:
class_counter[neighbor[2]] += 1
labels, votes = zip(*class_counter.most_common())
probabilityArray = votesToProbability(class_counter.most_common(), sum(votes))
return probabilityArray
def votesToProbability(tuplesList, totalVotes):
# tuplesList is of form [(num1,num2), (num1,num2), ...] where num1 is the label and num2 is the number of votes for this label
labelVotesDict = dict(tuplesList)
numOfClasses = 5
probabilityArray = []
for i in range(0,numOfClasses):
if i in labelVotesDict: # calculate probability
probabilityArray.append(labelVotesDict[i] / totalVotes)
else: # this label doesn't exist in the dictionary so its probability is 0
probabilityArray.append(0)
return np.asarray(probabilityArray)
# Make a prediction with neighbors
def predict_classification(training_set, labels, test_instance, k, distance=distance):
neighbors = get_neighbors(training_set, labels, test_instance, k, distance=distance)
prediction = vote(neighbors)
return prediction
# Make a prediction probability with neighbors
def predict_proba_classification(training_set, labels, test_instance, k, distance=distance):
neighbors = get_neighbors(training_set, labels, test_instance, k, distance=distance)
prediction_proba = vote_prob(neighbors)
return prediction_proba
# kNN Algorithm
def k_nearest_neighbors(trainX, trainY, testX, num_neighbors):
predictions = list()
for row in testX:
output = predict_classification(trainX, trainY, row, num_neighbors, distance=distance )
predictions.append(output)
return(predictions)
# kNN Algorithm probability predictions
def k_nearest_neighbors_proba(trainX, trainY, testX, num_neighbors):
predictions_proba = list()
for row in testX:
output = predict_proba_classification(trainX, trainY, row, num_neighbors, distance=distance )
predictions_proba.append(output)
return(predictions_proba)
# Evaluate an algorithm using a cross validation split
# Specific evaluation for our knn algorithm
def evaluate_algorithm(trainX, trainY, n_folds, labelEncoder):
# make rocPlots for each fold in 10 CV
f, axs = plt.subplots(5, 2)
f.set_figheight(30)
f.set_figwidth(30)
scoresAccuracy = list()
scoresPrecision = list()
scoresRecall = list()
scoresF1 = list()
cv = StratifiedKFold(n_splits=10)
i = 0
z = 0
k = 1
for train, test in cv.split(trainX, trainY):
predictions = k_nearest_neighbors(trainX[train], trainY[train], trainX[test], 100)
predY = np.asarray(predictions)
scoresAccuracy.append(accuracy_score(trainY[test], predY))
scoresPrecision.append(precision_score(trainY[test], predY, average='weighted'))
scoresRecall.append(recall_score(trainY[test], predY, average='weighted'))
scoresF1.append(f1_score(trainY[test], predY, average='weighted'))
# make roc plot for this fold
predictions_proba = k_nearest_neighbors_proba(trainX[train], trainY[train], trainX[test], 100)
predY_proba = np.asarray(predictions_proba)
makeRocPlot(trainY[test], predY_proba, labelEncoder, axs[i, z])
axs[i, z].set_title('Roc Plot for fold - {0}'.format(k))
k += 1
if z == 1:
i += 1
z = 0
else:
z = 1
plt.show()
scores = {'Accuracy':scoresAccuracy, 'Precision':scoresPrecision, 'Recall':scoresRecall, 'F1':scoresF1}
return scores
def KnnClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the KNN classifier we implemented
"""
trainXarray = trainX.toarray()
testXarray = testX.toarray()
print('\n----10 Fold Cross Validation Evaluation----')
# evaluate algorithm
n_folds = 10
scores = evaluate_algorithm(trainXarray, trainY, n_folds, labelEncoder)
print ('Precision \t %0.2f' % (sum(scores['Precision'])/float(len(scores['Precision']))))
print ('Recalls \t %0.2f' % (sum(scores['Recall'])/float(len(scores['Recall']))))
print ('F-Measure \t %0.2f' % (sum(scores['F1'])/float(len(scores['F1']))))
print('Accuracy: \t %0.2f' % (sum(scores['Accuracy'])/float(len(scores['Accuracy']))))
# Classification_report
predictions = k_nearest_neighbors(trainXarray, trainY, testXarray, 100)
predY = np.asarray(predictions)
print('\n----Report for predictions on test dataset----')
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
predictions_proba = k_nearest_neighbors_proba(trainXarray, trainY, testXarray, 100)
predY_proba = np.asarray(predictions_proba)
print('\n----ROC plot for predictions on test dataset----')
makeRocPlot(testY, predY_proba, labelEncoder)
plt.show()
return accuracy_score(testY, predY)
# endregion
# - ### *Split DataSet into TrainData and TestData*
# region
trainDataSet, testDataSet = train_test_split(myDataSetDf, test_size=0.2, stratify=myDataSetDf['CATEGORY'])
# reset index
trainDataSet.reset_index(drop=True, inplace=True)
testDataSet.reset_index(drop=True, inplace=True)
# save to tsv files
trainDataSet.to_csv('train_set.tsv', sep = '\t')
# save test_set categories
testDataSetCategories = testDataSet[['CATEGORY']].copy()
testDataSetCategories.to_csv('test_set_categories.tsv', sep = '\t')
testDataSet = testDataSet.drop('CATEGORY', axis=1)
testDataSet.to_csv('test_set.tsv', sep = '\t')
# endregion
# Prepare train and test data that we will need below
# region
# build label encoder for categories
le = preprocessing.LabelEncoder()
le.fit(trainDataSet["CATEGORY"])
# transform categories into numbers
trainY = le.transform(trainDataSet["CATEGORY"])
testY = le.transform(testDataSetCategories["CATEGORY"])
accuracyDict = dict()
# endregion
# ## __Vectorization__
# Let's do classification using 2 different ways of vectorization
# region language="javascript"
# IPython.OutputArea.prototype._should_scroll = function(lines) {
# return false;
# }
# endregion
# - #### Bag-of-words vectorization
# region
bowVectorizer = CountVectorizer(max_features=1000)
trainX = bowVectorizer.fit_transform(trainDataSet['CONTENT'])
testX = bowVectorizer.transform(testDataSet['CONTENT'])
print('-------------SVM Classification with BOW Vectorization-------------')
accuracyDict["BOW-SVM"] = SvmClassification(trainX, trainY, testX, testY, le)
print('-------------SVM Classification with BOW Vectorization and GridSearchCV for demonstration-------------')
SvmClassificationGridSearchCVDemo(trainX, trainY, testX, testY, le)
print('\n-------------Random Forests Classification with BOW Vectorization-------------')
accuracyDict["BOW-RandomForests"] = RandomForestClassification(trainX, trainY, testX, testY, le)
print('\n-------------Naive Bayes Classification with BOW Vectorization-------------')
accuracyDict["BOW-NB"] = NaiveBayesClassification(trainX, trainY, testX, testY, le)
print('\n-------------K Nearest Neighbor Classification with BOW Vectorization-------------')
accuracyDict["BOW-knn"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# - #### Tf-idf vectorization
# region
tfIdfVectorizer = TfidfVectorizer(max_features=1000)
trainX = tfIdfVectorizer.fit_transform(trainDataSet['CONTENT'])
testX = tfIdfVectorizer.transform(testDataSet['CONTENT'])
print('-------------SVM Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-SVM"] = SvmClassification(trainX, trainY, testX, testY, le)
print('\n-------------Random Forests Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-RandomForests"] = RandomForestClassification(trainX, trainY, testX, testY, le)
print('\n-------------Naive Bayes Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-NB"] = NaiveBayesClassification(trainX, trainY, testX, testY, le)
print('\n-------------K Nearest Neighbor Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-knn"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# #### Results Summary
# region
resultsData = {r'Vectorizer \ Classifier': ['BOW', 'Tfidf'],
'SVM': [accuracyDict["BOW-SVM"], accuracyDict["TfIdf-SVM"]],
'Random Forest': [accuracyDict["BOW-RandomForests"], accuracyDict["TfIdf-RandomForests"]],
'Naive Bayes': [accuracyDict["BOW-NB"], accuracyDict["TfIdf-NB"]],
'K Nearest Neighbor': [accuracyDict["BOW-knn"], accuracyDict["TfIdf-knn"]]}
resultsDataFrame =
|
pd.DataFrame(data=resultsData)
|
pandas.DataFrame
|
"""
Copyright (C) 2020 by the Georgia Tech Research Institute (GTRI)
This software may be modified and distributed under the terms of
the BSD 3-Clause license. See the LICENSE file for details.
"""
import json
import tempfile
import unittest
import uuid
from pathlib import Path
import pandas as pd
from model_processing.graph_creation import Evaluator, Manager, MDTranslator
from model_processing.graph_objects import DiEdge, PropertyDiGraph, Vertex
from . import DATA_DIRECTORY, OUTPUT_DIRECTORY, PATTERNS
class TestManager(unittest.TestCase):
def setUp(self):
pass
def test_ids_assigned_in_change(self):
manager = Manager(
excel_path=[
(
DATA_DIRECTORY
/ "Composition Example 2 Model Baseline.xlsx"
),
(DATA_DIRECTORY / "Composition Example 2 Model Changed.xlsx"),
],
json_path=[(PATTERNS / "Composition.json")],
)
eval_base = manager.evaluators[0]
eval_change = manager.evaluators[1]
eval_base.rename_df_columns()
eval_base.add_missing_columns()
eval_base.to_property_di_graph()
eval_change.rename_df_columns()
eval_change.add_missing_columns()
eval_change.to_property_di_graph()
self.assertTrue(
set(eval_base.translator.uml_id.keys()).issubset(
set(eval_change.translator.uml_id.keys())
)
)
for key in eval_base.translator.uml_id.keys():
if key != "count":
assert (
eval_base.translator.uml_id[key]
== eval_change.translator.uml_id[key]
)
def test_get_json_data(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
expected_keys = [
"Columns to Navigation Map",
"Pattern Graph Edges",
"Root Node",
"Vertex MetaTypes",
"Vertex Settings",
"Vertex Stereotypes",
]
assert expected_keys == list(manager.json_data[0].keys())
def test_create_evaluators(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
# weak test: create_evaluators() run during init
self.assertEqual(2, len(manager.evaluators))
for eval in manager.evaluators:
self.assertIsInstance(eval, Evaluator)
def test_get_pattern_graph_diff(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
# Create the actual graph object because get_pattern_graph_diff
# employs the graph object properties
# with 2 different original edges of the same type I can induce a
# match based on rename and an unstable pair.
og_eval = manager.evaluators[0]
og_graph = PropertyDiGraph()
og_eval.prop_di_graph = og_graph
ch_eval = manager.evaluators[1]
ch_graph = PropertyDiGraph()
ch_eval.prop_di_graph = ch_graph
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
orig_edge = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
renm_source = DiEdge(
source=Vertex(
name="Subaru",
id="_001",
original_name="Car",
original_id="_001",
node_types=["Atomic Thing"],
),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
orig_edge2 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Vehicle", id="_003"),
edge_attribute="type",
)
unstab_edge1 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Not Car", id="_100"),
edge_attribute="type",
)
unstab_edge2 = DiEdge(
source=Vertex(name="Cup", id="_101"),
target=Vertex(name="Vehicle", id="_003"),
edge_attribute="type",
)
added_edge = DiEdge(
source=Vertex(
name="New Source",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
target=Vertex(
name="New Target",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="newEdge",
)
del_edge = DiEdge(
source=Vertex(name="Old Source", id="_010"),
target=Vertex(name="Old Target", id="_011"),
edge_attribute="oldEdge",
)
original_edges = [orig_edge, orig_edge2, del_edge]
change_edge = [
renm_source,
unstab_edge1,
unstab_edge2,
added_edge,
]
orig_attrs = [
{"diedge": edge, "edge_attribute": edge.edge_attribute}
for edge in original_edges
]
change_attrs = [
{"diedge": edge, "edge_attribute": edge.edge_attribute}
for edge in change_edge
]
for edge in zip(original_edges, orig_attrs):
og_graph.add_node(
edge[0].source.name,
**{edge[0].source.name: edge[0].source}
)
og_graph.add_node(
edge[0].target.name,
**{edge[0].target.name: edge[0].target}
)
og_graph.add_edge(
edge[0].source.name, edge[0].target.name, **edge[1]
)
for edge in zip(change_edge, change_attrs):
ch_graph.add_node(
edge[0].source.name,
**{edge[0].source.name: edge[0].source}
)
ch_graph.add_node(
edge[0].target.name,
**{edge[0].target.name: edge[0].target}
)
ch_graph.add_edge(
edge[0].source.name, edge[0].target.name, **edge[1]
)
ch_dict = manager.get_pattern_graph_diff(out_directory=tmpdir)
ch_dict = ch_dict["0-1"]
changes = ch_dict["Changes"]
add = changes["Added"] # a list
deld = changes["Deleted"] # a list
unstab = ch_dict["Unstable Pairs"] # DiEdge: [DiEdge ...]
unstab[orig_edge2] = set(unstab[orig_edge2])
change = changes[orig_edge]
assert change[0] == renm_source
# TODO: Find new edges if type is not found in original and if
# the edge is composed of at least one new model element.
assert add == [added_edge, added_edge]
assert deld == [del_edge]
assert unstab == {
orig_edge2: {unstab_edge1, renm_source, unstab_edge2}
}
def test_changes_to_excel(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(1)
],
json_path=[PATTERNS / "Composition.json"],
)
og_edge = DiEdge(
source=Vertex(name="green"),
target=Vertex(name="apple"),
edge_attribute="fruit",
)
change_edge = DiEdge(
source=Vertex(name="gala"),
target=Vertex(name="apple"),
edge_attribute="fruit",
)
added_edge = DiEdge(
source=Vertex(name="blueberry"),
target=Vertex(name="berry"),
edge_attribute="bush",
)
deleted_edge = DiEdge(
source=Vertex(name="yellow"),
target=Vertex(name="delicious"),
edge_attribute="apple",
)
unstable_key = DiEdge(
source=Vertex(name="tomato"),
target=Vertex(name="fruit"),
edge_attribute="fruit",
)
unstable_one = DiEdge(
source=Vertex(name="tomato"),
target=Vertex(name="vegetable"),
edge_attribute="fruit",
)
unstable_two = DiEdge(
source=Vertex(name="tomahto"),
target=Vertex(name="fruit"),
edge_attribute="fruit",
)
fake_datas = {
"0-1": {
"Changes": {
"Added": [added_edge],
"Deleted": [deleted_edge],
og_edge: [change_edge],
},
"Unstable Pairs": {
unstable_key: [unstable_one, unstable_two]
},
}
}
manager.evaluator_change_dict = fake_datas
with tempfile.TemporaryDirectory() as tmpdir:
outdir = Path(tmpdir)
manager.changes_to_excel(out_directory=outdir)
created_file_name = list(outdir.glob("*.xlsx"))[0]
created_file = OUTPUT_DIRECTORY / created_file_name
created_df = pd.read_excel(created_file)
created_dict = created_df.to_dict()
expected_data = {
"Edit 1": ["('green', 'apple', 'fruit')"],
"Edit 2": ["('gala', 'apple', 'fruit')"],
"Unstable Matches Original": [
"('tomato', 'fruit', 'fruit')",
"('tomato', 'fruit', 'fruit')",
],
"Unstable Matches Change": [
"('tomato', 'vegetable', 'fruit')",
"('tomahto', 'fruit', 'fruit')",
],
"Added": ["('blueberry', 'berry', 'bush')"],
"Deleted": ["('yellow', 'delicious', 'apple')"],
}
expected_df = pd.DataFrame(
data=dict(
[(k, pd.Series(v)) for k, v in expected_data.items()]
)
)
expected_dict = expected_df.to_dict()
self.assertDictEqual(expected_dict, created_dict)
self.assertTrue(expected_df.equals(created_df))
def test_graph_difference_to_json(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
tr = manager.translator[0]
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
orig_edge = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
renm_source = DiEdge(
source=Vertex(
name="Subaru",
id="_001",
original_name="Car",
original_id="_001",
node_types=["Atomic Thing"],
),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
orig_edge2 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Vehicle", id="_003"),
edge_attribute="type",
)
renm_target = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(
name="vehicle",
id="_003",
original_name="Vehicle",
original_id="_003",
node_types=["Composite Thing"],
),
edge_attribute="type",
)
orig_edge3 = DiEdge(
source=Vertex(name="subaru", id="_004"),
target=Vertex(name="Vehicle", id="_005"),
edge_attribute="type",
)
renm_both = DiEdge(
source=Vertex(
name="Subaru",
id="_004",
original_name="subaru",
original_id="_004",
node_types=["composite owner"],
),
target=Vertex(
name="vehicle",
id="_005",
original_name="Vehicle",
original_id="_005",
node_types=["Atomic Thing"],
),
edge_attribute="type",
)
orig_edge4 = DiEdge(
source=Vertex(name="subaru", id="_004"),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
new_source = DiEdge(
source=Vertex(
name="Subaru",
id=uuid.uuid4(),
node_types=["Composite Thing"],
),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
orig_edge5 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Vehicle", id="_005"),
edge_attribute="type",
)
new_target = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(
name="vehicle",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="type",
)
orig_edge6 = DiEdge(
source=Vertex(name="Car", id="_007"),
target=Vertex(name="Vehicle", id="_005"),
edge_attribute="type",
)
new_sub = Vertex(
name="Subaru", id=uuid.uuid4(), node_types=["Composite Thing"]
)
sub_cons = {
"successors": [
{
"source": "Subaru",
"target": "Car",
"edge_attribute": "type",
}
]
}
new_sub.successors = sub_cons
new_both = DiEdge(
source=Vertex(
name="Subaru",
id=uuid.uuid4(),
node_types=["Composite Thing"],
),
target=Vertex(
name="vehicle",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="type",
)
added_edge = DiEdge(
source=Vertex(
name="New Source",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
target=Vertex(
name="New Target",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="newEdge",
)
del_edge = DiEdge(
source=Vertex(name="Old Source", id="_010"),
target=Vertex(name="Old Target", id="_011"),
edge_attribute="oldEdge",
)
change_dict = {
orig_edge: [renm_source],
orig_edge2: [renm_target],
orig_edge3: [renm_both],
orig_edge4: [new_source],
orig_edge5: [new_target],
orig_edge6: [new_both],
"Added": [added_edge],
"Deleted": [del_edge],
}
changes = manager.graph_difference_to_json(
change_dict=change_dict,
evaluators="0-1",
translator=tr,
out_directory=tmpdir,
)
rename = 0
replace = 0
create = 0
delete = 0
fall_through_ops = []
for item in changes:
op = item["ops"][0]["op"]
if op == "create":
create += 1
elif op == "replace":
replace += 1
elif op == "rename":
rename += 1
elif op == "delete":
delete += 1
else:
fall_through_ops.append(op)
# expect 4 node Renames
# expect 7 edge replaces (1 is from add edge)
# expect 6 node creates
# expect 1 delete
assert (
rename == 4 and replace == 7 and create == 6 and delete == 1
)
assert not fall_through_ops
def tearDown(self):
pass
class TestEvaluator(unittest.TestCase):
# TODO: Make sure all additional graph objects that are desired are
# created by the graph creation logic.
# TODO: Test the PROCESS of some of these functions.
def setUp(self):
data = (PATTERNS / "Composition.json").read_text()
data = json.loads(data)
self.translator = MDTranslator(
json_path=(PATTERNS / "Composition.json"), json_data=data
)
self.evaluator = Evaluator(
excel_file=DATA_DIRECTORY / "Composition Example.xlsx",
translator=self.translator,
)
data_dict = {
"Component": [
"Car",
"Car",
"Car",
"Car",
"Car",
"Car",
"Car",
"Wheel",
"Wheel",
"Wheel",
"Engine",
"Engine",
"Engine",
"Engine",
"Engine",
"Engine",
],
"Position": [
"engine",
"chassis",
"driveshaft",
"front passenger",
"front driver",
"rear passenger",
"rear driver",
"hub",
"tire",
"lug nut",
"one",
"two",
"three",
"four",
"drive output",
"mount",
],
"Part": [
"Engine",
"Chassis",
"Driveshaft",
"Wheel",
"Wheel",
"Wheel",
"Wheel",
"Hub",
"Tire",
"Lug Nut",
"Cylinder",
"Cylinder",
"Cylinder",
"Cylinder",
"Drive Output",
"Mount",
],
}
self.evaluator.df =
|
pd.DataFrame(data=data_dict)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.