prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from tests.fixtures import DataTestCase
import mock
from tsfresh.feature_extraction import MinimalFCParameters
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
class RelevantFeatureAugmenterTestCase(DataTestCase):
def setUp(self):
self.test_df = self.create_test_data_sample()
fc_parameters = {"length": None}
self.kind_to_fc_parameters = {"a": fc_parameters.copy(),
"b": fc_parameters.copy()}
def test_not_fitted(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
self.assertRaises(RuntimeError, augmenter.transform, X)
def test_no_timeseries(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
y = pd.Series()
self.assertRaises(RuntimeError, augmenter.fit, X, y)
def test_nothing_relevant(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
column_value="val", column_id="id", column_sort="sort",
column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = pd.DataFrame(index=[10, 500])
augmenter.set_timeseries_container(self.test_df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
self.assertEqual(list(transformed_X.columns), [])
self.assertEqual(list(transformed_X.index), list(X.index))
def test_evaluate_only_added_features_true(self):
"""
The boolean flag `evaluate_only_extracted_features` makes sure that only the time series based features are
filtered. This unit tests checks that
"""
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=True,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
y = | pd.Series({10: 1, 500: 0}) | pandas.Series |
# %%
import re
import pandas as pd
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
train = pd.read_csv("labeledTrainData.tsv", header=0, # 读入标记训练集
delimiter="\t", quoting=3)
test = pd.read_csv("testData.tsv", header=0, delimiter="\t", quoting=3) # 读入测试集
unlabeled_train = pd.read_csv("unlabeledTrainData.tsv", header=0, # 读入未标记训练集
delimiter="\t", quoting=3)
# 确认被读取评论的数量
print("Read %d labeled train reviews, %d labeled test reviews, "
"and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size))
# %%
# 对评论进行处理
def review_to_wordlist(review, remove_stopwords=False):
# 是否移除停止词由remove_stopwords决定,
# 本函数主要考虑移除HTML标识和非字母元素
# 函数将评论转换为单词序列,可选择删除停止词。返回单词列表。
# 1. Remove HTML
review_text = BeautifulSoup(review, features="html.parser").get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]", " ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
words = set(words)
#
# 5. Return a list of words
return words
# Download the punkt tokenizer for sentence splitting
import nltk.data
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# Define a function to split a review into parsed sentences
# 定义一个函数,将评论拆分为已解析的句子
def review_to_sentences(review, tokenizer, remove_stopwords=False):
# 函数将评论拆分为已解析的句子。返回句子列表,其中每个句子都是单词列表
#
# 1. 使用NLTK标记器将段落拆分为句子
raw_sentences = tokenizer.tokenize(review.strip())
#
# 2. 在每个句子上循环
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append(review_to_wordlist(raw_sentence,
remove_stopwords))
# Return the list of sentences (each sentence is a list of words,so this returns a list of lists
return sentences
# %%
sentences = [] # Initialize an empty list of sentences
# 将未标记和标记的训练集都加入了训练
# 下面两个for循环将评论分成句子
print("Parsing sentences from training set")
for review in train["review"]:
sentences += review_to_sentences(review, tokenizer)
print("Parsing sentences from unlabeled set")
for review in unlabeled_train["review"]:
sentences += review_to_sentences(review, tokenizer)
# 需要一些时间
# %%
# Import the built-in logging module and configure it so that Word2Vec creates nice output messages
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality 词向量维数
min_word_count = 40 # Minimum word count 最小字数
num_workers = 16 # Number of threads to run in parallel 并行运行的线程数
context = 10 # Context window size 上下文窗口大小
downsampling = 1e-3 # Downsample setting for frequent words 频繁词的下采样设置
# Initialize and train the model (this will take some time)
from gensim.models import word2vec
print("Training model...")
model = word2vec.Word2Vec(sentences, workers=num_workers,
size=num_features, min_count=min_word_count,
window=context, sample=downsampling)
# %%
# 如果不打算进一步训练模型,那么调用init_sims将使模型的内存效率大大提高。
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# 创建一个有意义的模型名和
# save the model for later use. You can load it later using Word2Vec.load()
# 保存模型供以后使用。您可以稍后使用Word2Vec.load()加载它
model_name = "300features_40minwords_10context"
model.save(model_name)
# %%
# Load the model that we created in Part 2
from gensim.models import Word2Vec
model = Word2Vec.load("300features_40minwords_10context")
# type(model.syn0)
# model.syn0.shape
print(type(model.wv.vectors))
print(model.wv.vectors.shape)
# %%
import numpy as np # Make sure that numpy is imported
print("Read %d labeled train reviews, %d labeled test reviews, " \
"and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size))
num_features = 300 # 300个特征
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append(review_to_wordlist(review, remove_stopwords=True))
print("Creating average feature vecs for test reviews")
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append(review_to_wordlist(review, remove_stopwords=True))
# %%
from sklearn.cluster import KMeans
import time
start = time.time() # Start time
# Set "k" (num_clusters) to be 1/5th of the vocabulary size, or an
# average of 5 words per cluster
word_vectors = model.wv.vectors
num_clusters = int((word_vectors.shape[0] / 5))
# Initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans(n_clusters=num_clusters)
idx = kmeans_clustering.fit_predict(word_vectors)
# 将word2vec的词向量聚类 sizeof(word_vectors) =(16490,300)
# 16490个单词每个单词对应300个特征
# 输出的是一个单词对应的聚类号(一维数组)
# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print("Time taken for K Means clustering: ", elapsed, "seconds.")
word_centroid_map = dict(zip(model.wv.index2word, idx))
# 将得到的聚类结合单词得到每个单词的聚类zip之后成为元组再转化为dict
# %%
# For the first 10 clusters
for cluster in range(0, 10):
#
# Print the cluster number
print("\nCluster %d" % cluster)
#
# Find all of the words for that cluster number, and print them out
words = []
mapvalue = list(word_centroid_map.values())
for i in range(0, len(mapvalue)):
if mapvalue[i] == cluster:
words.append(list(word_centroid_map.keys())[i])
print(words)
# %%
def create_bag_of_centroids(wordlist, word_centroid_map):
#
# 聚类中心数
num_centroids = max(word_centroid_map.values()) + 1
#
# Pre-allocate the bag of centroids vector (for speed)
bag_of_centroids = np.zeros(num_centroids, dtype="float32")
# 创建一个聚类数大小的0数组,统计
#
# 如果单词在reviews中将单词的索引所对应的 bag_of_centroids 数组对应数组加一
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
#
# Return the "bag of centroids"
return bag_of_centroids
# %%
# Pre-allocate an array for the training set bags of centroids (for speed)
train_centroids = np.zeros((train["review"].size, num_clusters), dtype="float32")
# Transform the training set reviews into bags of centroids
counter = 0
for review in clean_train_reviews: # clean train reviews是一个评论组成的列表,对每个评论的单词其聚类进行统计
train_centroids[counter] = create_bag_of_centroids(review, word_centroid_map)
# review作为words传入create_bag_of_centroids
counter += 1
# Repeat for test reviews
test_centroids = np.zeros((test["review"].size, num_clusters),
dtype="float32")
counter = 0
for review in clean_test_reviews:
test_centroids[counter] = create_bag_of_centroids(review, word_centroid_map)
counter += 1
# Fit a random forest and extract predictions
from sklearn.ensemble import RandomForestClassifier
# 用随机森林将文本进行分类,使用100个决策树
forest = RandomForestClassifier(n_estimators=100)
# Fitting the forest may take a few minutes
print("Fitting a random forest to labeled training data...")
forest = forest.fit(train_centroids, train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = | pd.DataFrame(data={"id": test["id"], "sentiment": result}) | pandas.DataFrame |
import re
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test__get_intervals(self):
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert result[0] == expected_intervals
def test_fit(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer.fit(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.fuzzy = False
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.25
@patch('scipy.stats.norm.rvs')
def test__get_value_fuzzy(self, rvs_mock):
# setup
rvs_mock.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer.fit(data)
result = transformer.reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows
Output:
- the output of `_transform_by_category`
Side effects:
- `_transform_by_category` will be called once
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = | pd.Series([0.125, 0.375, 0.625, 0.875]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 13:30:31 2020
@author: User
"""
import sys
import datetime as dt
from collections import Counter
import pprint
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# import os
from platform import system
import glob
import cycler
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from bs4 import BeautifulSoup
import re
from scipy.stats import linregress
# from sklearn import linear_model
import scipy.signal
import itertools
from itertools import chain, repeat
import logging
import datetime as dt
from pathlib import Path
# import h5py
from multiprocessing import Pool, cpu_count
# import timeit
# import time
matplotlib.rcParams.update({"font.size": 16})
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["axes.edgecolor"] = "#333F4B"
plt.rcParams["xtick.color"] = "#333F4B"
plt.rcParams["ytick.color"] = "#333F4B"
try:
import statsmodels.formula.api as smf
import statsmodels.api as sm
import seaborn as sns
except Exception as e:
print("No modules: %s" % e)
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
if __name__ == "__main__":
print(f"Package: {__package__}, File: {__file__}")
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.experiments.EIS.models import Model_Collection
import post_helper
import merger
# import EC
# sys.path.append(list(FH_path.rglob('*.py')))
# import FH_path.joinpath('FindExpFolder.py')
# import FindExpFolder.py
# from FileHelper import FindExpFolder
# from FindExpFolder import *
# from .experiments import EIS
# from .runEC import run_PAR_DW
from elchempy.runEC.EC_logging_config import start_logging
# logger = start_logging(__name__)
else:
# print('\n\n***** run_PAR_DW *****')
print(f"File: {__file__}, Name:{__name__}, Package:{__package__}")
# FH_path = Path(__file__).parent.parent.parent
# sys.path.append(str(FH_path))
# import FileHelper
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.runEC.EC_logging_config import start_logging
from elchempy.PostEC import post_helper, merger
from elchempy.experiments.EIS.models import Model_Collection
# logger = start_logging(__name__)
_logger = logging.getLogger(__name__)
_logger.setLevel(20)
EvRHE = "E_AppV_RHE"
class PostEC:
AllColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"E_Applied_VRHE",
"j A/cm2",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
DropColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
KeepColls = [
"E_AppV_RHE",
"jmAcm-2",
"Jcorr",
"J_N2_scan",
"Jkin_max",
"Jkin_min",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
# SampleCodes = FindExpFolder.LoadSampleCode()
# FindExpFolder('VERSASTAT').SampleCodeLst
# PostDestDir.mkdir(parents=True,exist_ok=True)
# ExpPARovv = EC_loadOVV()
# OnlyRecentMissingOVV = runEC.MainPrepareList()
# ExpPARovv = ExpPARovv.iloc[100:120]
OutParsID = pd.DataFrame()
# Go1, Go2, Go3 = True, False, False
# Go1, Go2, Go3 = False, True, False
Go1, Go2, Go3 = False, False, True
# KL_coeff = KL_coefficients()
EvRHE_List = [
0,
0.1,
0.2,
0.3,
0.4,
0.45,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.9,
1,
]
def __init__(self):
self.DestDir = FindExpFolder("VERSASTAT").PostDir
@staticmethod
def StartLogging(level_log="INFO"):
# level_log = kwargs['level']
log_fn = FindExpFolder("VERSASTAT").PostDir.joinpath("PostEC_logger.log")
logging.basicConfig(
filename=log_fn,
filemode="w",
level=level_log,
format="%(asctime)s %(levelname)s, %(lineno)d: %(message)s",
)
logging.warning("Started logging for PostEC script...")
def applyParallel(dfGrouped, func):
with Pool(cpu_count() - 1) as p:
ret_list = p.map(func, [group for name, group in dfGrouped])
return ret_list
def check_status(file, verbose=False):
"""Check status will return (status,extra) of filename"""
PAR_file_test = Path(str(file)).stem
match = [
re.search("(?<!VERS|Vers)(AST|postAST|pAST)", str(a))
for a in PAR_file_test.split("_")
]
if any(match):
status = "EoL"
extra = [
a
for a in PAR_file_test.split("_")
if [i for i in match if i][0][0] in a
]
if verbose:
print(file, status, *extra)
return status, extra[0]
# if any([re.search(r'', i) for i in str(Path(str(file)).stem.split('_'))]):
else:
return "BoL", 0
# status =
# extra = [0]
# return status,extra
def postEC_Status(files, verbose=False):
# files = ['N2_HER_1500rpm_JOS6_pAST-sHA_285_#3_Disc_Parstat']
if len(files) > 1:
status_lst, extra_lst = [], []
for file in files:
status, extra = PostEC.check_status(file)
status_lst.append(status)
extra_lst.append(extra)
return status_lst, extra_lst
else:
return PostEC.check_status(files)
def OLD_PostOrganizeFolders(TakeRecentList=True):
postOVV = []
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PAR_version = FileOperations.version
RunOVV_fn_opts = list(
FindExpFolder("VERSASTAT").DestDir.rglob(
"RunOVV_v{0}.xlsx".format(PAR_version)
)
)
RunOVV_fn = [i for i in RunOVV_fn_opts if not "_Conflict" in i.stem][0]
if RunOVV_fn.is_file() and TakeRecentList == True:
OvvFromFile = pd.read_excel(RunOVV_fn, index_col=[0])
status, extra = PostEC.postEC_Status(OvvFromFile.PAR_file.values)
OvvFromFile = OvvFromFile.assign(
**{
"Date_PAR_EXP": OvvFromFile.PAR_date - OvvFromFile.EXP_date,
"Status": status,
"Extra": extra,
}
)
OnlyRecentMissingOVV = OvvFromFile
# OvvFromFile['Date_PAR_EXP'] = OvvFromFile.PAR_date-OvvFromFile.EXP_date
# OvvFromFile['Status'] = OvvFromFile.PAR_file.values
print("EC OVV loaded from file:{0}".format(RunOVV_fn))
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, ["Dest_dir", "EXP_dir", "PAR_file"]
)
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
# CS_parts_pOVV = FileOperations.find_CS_parts(OnlyRecentMissingOVV.Dest_dir.iloc[0])
# chLst =[]
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in OnlyRecentMissingOVV.Dest_dir.values]
# OnlyRecentMissingOVV['Dest_dir'] = chLst
# else:
# pass
postOVVlst, outLst = [], []
postOVVcols = [
"DestFilename",
"SampleID",
"Status",
"Status_extra",
"Electrolyte",
"Gas",
"RPM",
"Scanrate",
"EXP_date",
"Type_Exp",
"SourceFilename",
"Exp_dir",
]
# postOVVout = PostEC.FromListgrp(group)
# postOVVlst = PostEC.applyParallel(OnlyRecentMissingOVV.groupby('Dest_dir'),PostEC.FromListgrp)
# postOVVlst = [outLst.append(PostEC.FromListgrp(i)) for i in OnlyRecentMissingOVV.groupby('Dest_dir')]
# for i in OnlyRecentMissingOVV.groupby('Dest_dir'):
# PostEC.FromListgrp(i)
# try:
# postOVVout = pd.DataFrame(postOVVlst,columns=)
# except Exception as e:
# postOVVout = pd.DataFrame(postOVVlst)
# for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir']):
# PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])
# pass
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir'])]
postOVVout = pd.concat(
[pd.DataFrame(i, columns=postOVVcols) for i in outLst],
sort=False,
ignore_index=True,
)
postOVVout.to_excel(PostDestDir.joinpath("postEC_Organized.xlsx"))
return postOVVout
class EnterExitLog:
def __init__(self, funcName):
self.funcName = funcName
def __enter__(self):
_logger.info(f"Started: {self.funcName}")
self.init_time = dt.datetime.now()
return self
def __exit__(self, type, value, tb):
self.end_time = dt.datetime.now()
self.duration = self.end_time - self.init_time
_logger.info(f"Finished: {self.funcName} in {self.duration} seconds")
def func_timer_decorator(func):
def func_wrapper(*args, **kwargs):
with EnterExitLog(func.__name__):
return func(*args, **kwargs)
return func_wrapper
def get_daily_pickle(exp_type=""):
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(daily_pkl_options, key=lambda x: x.stat().st_ctime)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
}
)
if "EIS" in exp_type:
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_BRUTE_{system()}.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_RAW_WB_{system()}.pkl.compress"
),
}
)
return _result
def _collect_test():
tt = CollectLoadPars(load_type="fast")
class CollectLoadPars:
def __init__(self, load_type="fast"):
self.load_type = load_type
self.load_pars()
self.collect_dict()
def load_pars(self):
_BaseLoad = BaseLoadPars()
_kws = {"EC_index": _BaseLoad.EC_index, "SampleCodes": _BaseLoad.SampleCodes}
if "fast" in self.load_type:
_kws.update(**{"reload": False, "reload_raw": False})
self.EIS_load = EIS_LoadPars(**_kws)
self.ORR_load = ORR_LoadPars(**_kws)
self.N2_load = N2_LoadPars(**_kws)
def collect_dict(self):
_load_attrs = [i for i in self.__dict__.keys() if i.endswith("_load")]
_collect = {}
for _load_pars in _load_attrs:
_pars_name = f'{_load_pars.split("_")[0]}_pars'
if hasattr(getattr(self, _load_pars), _pars_name):
_pars = getattr(getattr(self, _load_pars), _pars_name)
_collect.update({_pars_name: _pars})
self.pars_collection = _collect
class BaseLoadPars:
_required_funcs = [
"make_raw_pars_from_scratch",
"edit_raw_columns",
"search_pars_files",
"read_in_pars_files",
"extra_stuff_delegator",
]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
reload=False,
reload_raw=False,
):
self.exp_type = exp_type
self._auto_set_exp_type()
self.EC_index = EC_index
self.SampleCodes = SampleCodes
self._check_class_req_functions()
self.check_EC_index()
self.set_OVV_exp_type()
self._reload = reload
self._reload_raw = reload_raw
self.get_daily_pickle()
if self.exp_type:
self.load_delegator()
def _auto_set_exp_type(self):
_cls_name = self.__class__.__name__
if "_" in _cls_name:
_cls_exp_type = _cls_name.split("_")[0]
_exp_type = f"{_cls_exp_type}_pars"
self.exp_type = _exp_type
def check_EC_index(self):
if self.EC_index.empty:
EC_index = ECRunOVV(load=1).EC_index
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
self.EC_index = EC_index
if self.SampleCodes.empty:
SampleCodes = FindExpFolder().LoadSampleCode()
self.SampleCodes = SampleCodes
# SampleCodesChar().load
def set_OVV_exp_type(self):
if not self.EC_index.empty and self.exp_type:
PAR_exp_uniq = self.EC_index.PAR_exp.unique()
PAR_match = [
parexp
for parexp in PAR_exp_uniq
if self.exp_type.split("_")[0] in parexp
]
self.exp_type_match = PAR_match
# if PAR_match:
EC_index_exp = self.EC_index.loc[self.EC_index.PAR_exp.isin(PAR_match)]
self.EC_index_exp = EC_index_exp
if EC_index_exp.empty:
_logger.error(f'set_OVV_exp_type "{self.__class__.__name__}" empty')
self.EC_index_exp_destdirs = EC_index_exp.Dest_dir.unique()
def get_daily_pickle(self):
exp_type = self.exp_type
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(
daily_pkl_options, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
if not daily_pkl_options and not self._reload_raw:
self._reload_raw = True
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
_pickle_path_RAW_read_in = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{exp_type}_{system()}_RAW_read_in.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
"pkl_path_RAW_read_in": _pickle_path_RAW_read_in,
}
)
if "EIS" in exp_type:
daily_pkl_options_RAW_WB = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW_WB.pkl.compress"
)
)
daily_pkl_options_RAW_WB = sorted(
daily_pkl_options_RAW_WB, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_BRUTE.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder(
"VERSASTAT"
).PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW_WB.pkl.compress"
),
"daily_options_RAW_WB": daily_pkl_options_RAW_WB,
}
)
self.daily_pickle_path = _result
def load_delegator(self):
setattr(self, self.exp_type, pd.DataFrame())
if self._reload:
if self._reload_raw:
self.make_raw_pars_from_scratch()
else:
self.read_in_daily_raw()
if hasattr(self, "edit_raw_columns"):
try:
self.edit_raw_columns()
except Exception as e:
_logger.warning(
f'edit_raw_columns in load_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
self.save_daily_pars()
else:
self.read_in_daily_pars()
try:
self.extra_stuff_delegator()
except Exception as e:
_logger.warning(
f'extra_stuff_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
def _check_class_req_functions(self):
for _f in self._required_funcs:
if not hasattr(self, _f) and "BaseLoadPars" not in self.__class__.__name__:
_logger.warning(
f'Class "{self.__class__.__name__}" is missing required func: "{_f}"'
)
def save_daily_pars(self):
pars = getattr(self, self.exp_type)
pars.to_pickle(self.daily_pickle_path["daily_path"])
_logger.info(
f'{self.exp_type} len({len(pars)}) OVV to daily pickle: {self.daily_pickle_path.get("daily_path")}'
)
def read_in_daily_pars(self):
if self.daily_pickle_path.get("daily_options"):
_pars_fp = self.daily_pickle_path.get("daily_options")[-1]
_logger.info(
f"start read_in_daily_pars {self.exp_type} pars OVV from daily {_pars_fp} "
)
_pars = pd.read_pickle(_pars_fp)
try:
_pars = FileOperations.ChangeRoot_DF(_pars, [], coltype="string")
setattr(self, self.exp_type, _pars)
_logger.info(f"Loaded {self.exp_type} pars OVV from daily {_pars_fp} ")
except Exception as e:
_pars = pd.DataFrame()
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp} {e} "
)
else:
_pars = pd.DataFrame()
_pars_fp = "options empty list"
if _pars.empty:
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp}: empty "
)
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", _pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW_read_in.to_pickle(_raw_read_fp)
def read_in_daily_raw(self):
_raw_fp = self.daily_pickle_path.get("daily_options_RAW")[-1]
_pars_RAW = pd.read_pickle(_raw_fp)
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
if not "level_0" in _pars_RAW.columns:
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(f"Loaded raw df {self.exp_type} from daily {_raw_fp} ")
def save_daily_raw(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.to_pickle(self.daily_pickle_path.get("daily_path_RAW"))
_logger.info(
f'{self.exp_type} OVV to daily pickle: {self.daily_pickle_path.get("daily_path_RAW")}'
)
def set_gen_raw_fls(self):
_par_files = [
list(self.search_pars_files(d)) for d in self.EC_index_exp_destdirs
]
self._par_files = _par_files
if not _par_files:
_logger.warning(f"{self.exp_type} set_gen_raw_fls: list empty ")
self._par_fls_gen = (a for i in self._par_files for a in i)
@func_timer_decorator
def generate_raw_df(self):
if not hasattr(self, "_par_fls_gen"):
self.set_gen_raw_fls()
_pars_lst = list(self.read_in_pars_files(self._par_fls_gen))
try:
_pars_RAW = pd.concat(_pars_lst, sort=False)
except Exception as e:
_pars_RAW = pd.DataFrame()
_logger.warning(f"{self.exp_type} generate_raw_df: {e}")
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
@staticmethod
def get_source_meta(filepath):
i = filepath
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_meta_res = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
return _meta_res
def extra_stuff_delegator(self):
_extra_funcs = [i for i in self.__dict__.keys() if i.startswith("_extra")]
for _func in _extra_funcs:
try:
func = getattr(self, _func)
func()
# self._extra_plotting()
except Exception as e:
_logger.info(
f"{self.__class__.__name__} Extra stuff failed because {e}"
)
def _testing():
tt = EIS_LoadPars(reload=False, reload_raw=False)
tt._reload_raw
self = tt
self.load_delegator()
self.make_raw_pars_from_scratch()
class EIS_LoadPars(BaseLoadPars):
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="EIS_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
if i.name.endswith("xlsx"):
_pp = pd.read_excel(i, index_col=[0])
elif i.name.endswith("pkl"):
_pp = pd.read_pickle(i)
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_meta = self.get_source_meta(i)
_pp = _pp.assign(**_meta)
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def search_pars_files(self, _dest_dir):
return Path(_dest_dir.joinpath("EIS")).rglob(
f"*_pars_v{FileOperations.EIS_version}.xlsx"
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
self._load_WB_delegator()
self._merge_WB_pars_raw()
self._raw_finish_edit_columns()
self.save_daily_raw()
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
EIS_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
EIS_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_RAW_read_in.to_pickle(_raw_read_fp)
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def _raw_extra_steps(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
float_cols = set(
[
a
for i in EIS_pars_all.lmfit_var_names.unique()
if type(i) == str and not "(" in i
for a in i.split(", ")
]
)
float_cols.update(
set(
[a for i in float_cols for a in EIS_pars_all.columns if a.startswith(i)]
)
)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].fillna(0)
# EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
obj_flt_cols = [
i
for i in EIS_pars_all.columns
if str(EIS_pars_all[i].dtype) == "object" and i in float_cols
]
EIS_pars_all[obj_flt_cols] = EIS_pars_all[obj_flt_cols].replace("", 0)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
wrong_fls = [
EIS_pars_all.loc[EIS_pars_all[i].astype(str).str.contains("Parameter")]
for i in obj_flt_cols
]
if wrong_fls:
wrong_objflt_df = pd.concat(wrong_fls)
fix_dct = {
i: [
float(v.split("value=")[-1].split(",")[0])
for v in wrong_objflt_df[i].values
]
for i in obj_flt_cols
}
fixed_objflt_df = wrong_objflt_df.assign(**fix_dct)
EIS_pars_all = pd.concat(
[
EIS_pars_all.drop(index=wrong_objflt_df.index, axis=0),
fixed_objflt_df,
],
axis=0,
sort=True,
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _load_WB_delegator(self):
daily_options_WB = self.daily_pickle_path.get("daily_options_RAW_WB")
if daily_options_WB:
_WB_RAW_daily_path = daily_options_WB[-1]
if _WB_RAW_daily_path.exists() and not (self._reload or self._reload_raw):
_EIS_WB_pars_all = | pd.read_pickle(_WB_RAW_daily_path) | pandas.read_pickle |
import pandas as pd
from pandas.tseries.offsets import Day
from .pandas_extensions.holiday import Holiday
from .pandas_extensions.korean_holiday import (
KoreanSolarHoliday,
KoreanLunarHoliday,
alternative_holiday,
childrens_day_alternative_holiday,
last_business_day,
)
# Original precomputed KRX holidays
# that had been maintained formerly in exchange_calendar_xkrx.py.
original_precomputed_krx_holidays = pd.to_datetime(
[
"1986-01-01",
"1986-01-02",
"1986-01-03",
"1986-03-10",
"1986-05-05",
"1986-05-16",
"1986-06-06",
"1986-07-17",
"1986-08-15",
"1986-09-18",
"1986-10-01",
"1986-10-03",
"1986-10-09",
"1986-12-25",
"1986-12-29",
"1986-12-30",
"1986-12-31",
"1987-01-01",
"1987-01-02",
"1987-01-29",
"1987-03-10",
"1987-05-05",
"1987-07-17",
"1987-10-01",
"1987-10-07",
"1987-10-08",
"1987-10-09",
"1987-12-25",
"1987-12-28",
"1987-12-29",
"1987-12-30",
"1987-12-31",
"1988-01-01",
"1988-02-18",
"1988-03-01",
"1988-03-10",
"1988-04-05",
"1988-05-05",
"1988-05-23",
"1988-06-06",
"1988-08-15",
"1988-09-26",
"1988-10-03",
"1988-12-27",
"1988-12-28",
"1988-12-29",
"1988-12-30",
"1989-01-02",
"1989-01-03",
"1989-02-06",
"1989-03-01",
"1989-03-10",
"1989-04-05",
"1989-05-05",
"1989-05-12",
"1989-06-06",
"1989-07-17",
"1989-08-15",
"1989-09-14",
"1989-09-15",
"1989-10-12",
"1989-12-25",
"1990-01-01",
"1990-01-02",
"1990-01-26",
"1990-03-01",
"1990-04-05",
"1990-05-02",
"1990-06-06",
"1990-07-17",
"1990-08-15",
"1990-10-01",
"1990-10-02",
"1990-10-03",
"1990-10-04",
"1990-10-09",
"1990-12-25",
"1990-12-27",
"1990-12-28",
"1990-12-31",
"1991-01-02",
"1991-02-14",
"1991-02-15",
"1991-03-01",
"1991-04-05",
"1991-05-21",
"1991-06-06",
"1991-07-17",
"1991-08-15",
"1991-09-23",
"1991-10-03",
"1991-12-25",
"1991-12-27",
"1991-12-30",
"1991-12-31",
"1992-01-01",
"1992-01-02",
"1992-02-03",
"1992-02-04",
"1992-02-05",
"1992-03-10",
"1992-05-05",
"1992-07-17",
"1992-09-10",
"1992-09-11",
"1992-12-25",
"1992-12-28",
"1992-12-29",
"1992-12-30",
"1992-12-31",
"1993-01-01",
"1993-01-22",
"1993-03-01",
"1993-03-10",
"1993-04-05",
"1993-05-05",
"1993-05-28",
"1993-07-07",
"1993-09-29",
"1993-09-30",
"1993-10-01",
"1994-01-03",
"1994-02-09",
"1994-02-10",
"1994-02-11",
"1994-02-14",
"1994-03-01",
"1994-04-05",
"1994-05-05",
"1994-05-18",
"1994-05-20",
"1994-06-06",
"1994-08-15",
"1994-09-19",
"1994-09-20",
"1994-09-21",
"1994-10-03",
"1994-12-29",
"1994-12-30",
"1995-01-02",
"1995-01-30",
"1995-01-31",
"1995-02-01",
"1995-03-01",
"1995-04-05",
"1995-05-01",
"1995-05-05",
"1995-06-06",
"1995-07-17",
"1995-08-15",
"1995-09-08",
"1995-10-03",
"1995-12-25",
"1995-12-29",
"1996-01-01",
"1996-01-02",
"1996-02-19",
"1996-02-20",
"1996-03-01",
"1996-04-05",
"1996-05-01",
"1996-05-24",
"1996-06-06",
"1996-07-17",
"1996-08-15",
"1996-09-26",
"1996-09-27",
"1996-10-03",
"1996-12-25",
"1996-12-30",
"1996-12-31",
"1997-01-01",
"1997-02-07",
"1997-05-01",
"1997-05-05",
"1997-05-14",
"1997-06-06",
"1997-07-17",
"1997-08-15",
"1997-09-15",
"1997-09-16",
"1997-09-17",
"1997-10-03",
"1997-12-18",
"1997-12-25",
"1997-12-30",
"1997-12-31",
"1998-01-01",
"1998-01-02",
"1998-01-27",
"1998-01-28",
"1998-01-29",
"1998-05-01",
"1998-05-05",
"1998-07-17",
"1998-10-05",
"1998-10-06",
"1998-12-25",
"1998-12-29",
"1998-12-30",
"1998-12-31",
"1999-01-01",
"1999-02-15",
"1999-02-16",
"1999-02-17",
"1999-03-01",
"1999-04-05",
"1999-05-05",
"1999-09-23",
"1999-09-24",
"1999-12-29",
"1999-12-30",
"1999-12-31",
"2000-01-03",
"2000-02-04",
"2000-03-01",
"2000-04-05",
"2000-04-13",
"2000-05-01",
"2000-05-05",
"2000-05-11",
"2000-06-06",
"2000-07-17",
"2000-08-15",
"2000-09-11",
"2000-09-12",
"2000-09-13",
"2000-10-03",
"2000-12-25",
"2000-12-27",
"2000-12-28",
"2000-12-29",
"2001-01-01",
"2001-01-23",
"2001-01-24",
"2001-01-25",
"2001-03-01",
"2001-04-05",
"2001-05-01",
"2001-06-06",
"2001-07-17",
"2001-08-15",
"2001-10-01",
"2001-10-02",
"2001-10-03",
"2001-12-25",
"2001-12-31",
"2002-01-01",
"2002-02-11",
"2002-02-12",
"2002-02-13",
"2002-03-01",
"2002-04-05",
"2002-05-01",
"2002-06-06",
"2002-06-13",
"2002-07-01",
"2002-07-17",
"2002-08-15",
"2002-09-20",
"2002-10-03",
"2002-12-19",
"2002-12-25",
"2002-12-31",
"2003-01-01",
"2003-01-31",
"2003-05-01",
"2003-05-05",
"2003-05-08",
"2003-06-06",
"2003-07-17",
"2003-08-15",
"2003-09-10",
"2003-09-11",
"2003-09-12",
"2003-10-03",
"2003-12-25",
"2003-12-31",
"2004-01-01",
"2004-01-21",
"2004-01-22",
"2004-01-23",
"2004-03-01",
"2004-04-05",
"2004-04-15",
"2004-05-05",
"2004-05-26",
"2004-09-27",
"2004-09-28",
"2004-09-29",
"2004-12-31",
"2005-02-08",
"2005-02-09",
"2005-02-10",
"2005-03-01",
"2005-04-05",
"2005-05-05",
"2005-06-06",
"2005-08-15",
"2005-09-19",
"2005-10-03",
"2005-12-30",
"2006-01-30",
"2006-03-01",
"2006-05-01",
"2006-05-05",
"2006-05-31",
"2006-06-06",
"2006-07-17",
"2006-08-15",
"2006-10-03",
"2006-10-05",
"2006-10-06",
"2006-12-25",
"2006-12-29",
"2007-01-01",
"2007-02-19",
"2007-03-01",
"2007-05-01",
"2007-05-24",
"2007-06-06",
"2007-07-17",
"2007-08-15",
"2007-09-24",
"2007-09-25",
"2007-09-26",
"2007-10-03",
"2007-12-19",
"2007-12-25",
"2007-12-31",
"2008-01-01",
"2008-02-06",
"2008-02-07",
"2008-02-08",
"2008-04-09",
"2008-05-01",
"2008-05-05",
"2008-05-12",
"2008-06-06",
"2008-08-15",
"2008-09-15",
"2008-10-03",
"2008-12-25",
"2008-12-31",
"2009-01-01",
"2009-01-26",
"2009-01-27",
"2009-05-01",
"2009-05-05",
"2009-10-02",
"2009-12-25",
"2009-12-31",
"2010-01-01",
"2010-02-15",
"2010-03-01",
"2010-05-05",
"2010-05-21",
"2010-06-02",
"2010-09-21",
"2010-09-22",
"2010-09-23",
"2010-12-31",
"2011-02-02",
"2011-02-03",
"2011-02-04",
"2011-03-01",
"2011-05-05",
"2011-05-10",
"2011-06-06",
"2011-08-15",
"2011-09-12",
"2011-09-13",
"2011-10-03",
"2011-12-30",
"2012-01-23",
"2012-01-24",
"2012-03-01",
"2012-04-11",
"2012-05-01",
"2012-05-28",
"2012-06-06",
"2012-08-15",
"2012-10-01",
"2012-10-03",
"2012-12-19",
"2012-12-25",
"2012-12-31",
"2013-01-01",
"2013-02-11",
"2013-03-01",
"2013-05-01",
"2013-05-17",
"2013-06-06",
"2013-08-15",
"2013-09-18",
"2013-09-19",
"2013-09-20",
"2013-10-03",
"2013-10-09",
"2013-12-25",
"2013-12-31",
"2014-01-01",
"2014-01-30",
"2014-01-31",
"2014-05-01",
"2014-05-05",
"2014-05-06",
"2014-06-04",
"2014-06-06",
"2014-08-15",
"2014-09-08",
"2014-09-09",
"2014-09-10",
"2014-10-03",
"2014-10-09",
"2014-12-25",
"2014-12-31",
"2015-01-01",
"2015-02-18",
"2015-02-19",
"2015-02-20",
"2015-05-01",
"2015-05-05",
"2015-05-25",
"2015-08-14",
"2015-09-28",
"2015-09-29",
"2015-10-09",
"2015-12-25",
"2015-12-31",
"2016-01-01",
"2016-02-08",
"2016-02-09",
"2016-02-10",
"2016-03-01",
"2016-04-13",
"2016-05-05",
"2016-05-06",
"2016-06-06",
"2016-08-15",
"2016-09-14",
"2016-09-15",
"2016-09-16",
"2016-10-03",
"2016-12-30",
"2017-01-27",
"2017-01-30",
"2017-03-01",
"2017-05-01",
"2017-05-03",
"2017-05-05",
"2017-05-09",
"2017-06-06",
"2017-08-15",
"2017-10-02",
"2017-10-03",
"2017-10-04",
"2017-10-05",
"2017-10-06",
"2017-10-09",
"2017-12-25",
"2017-12-29",
"2018-01-01",
"2018-02-15",
"2018-02-16",
"2018-03-01",
"2018-05-01",
"2018-05-07",
"2018-05-22",
"2018-06-06",
"2018-06-13",
"2018-08-15",
"2018-09-24",
"2018-09-25",
"2018-09-26",
"2018-10-03",
"2018-10-09",
"2018-12-25",
"2018-12-31",
"2019-01-01",
"2019-02-04",
"2019-02-05",
"2019-02-06",
"2019-03-01",
"2019-05-01",
"2019-05-06",
"2019-06-06",
"2019-08-15",
"2019-09-12",
"2019-09-13",
"2019-10-03",
"2019-10-09",
"2019-12-25",
"2019-12-31",
"2020-01-01",
"2020-01-24",
"2020-01-27",
"2020-04-15",
"2020-04-30",
"2020-05-01",
"2020-05-05",
"2020-08-17",
"2020-09-30",
"2020-10-01",
"2020-10-02",
"2020-10-09",
"2020-12-25",
"2020-12-31",
"2021-01-01",
"2021-02-11",
"2021-02-12",
"2021-03-01",
"2021-05-05",
"2021-05-19",
"2021-09-20",
"2021-09-21",
"2021-09-22",
"2021-12-31",
]
)
# Automatically generated holidays using /etc/update_xkrx_holidays.py script.
# Note that there are some missing holidays compared to the original holidays.
dumped_precomputed_krx_holidays = pd.to_datetime(
[
"1975-02-12",
"1975-03-10",
"1975-05-05",
"1975-06-06",
"1975-07-17",
"1975-08-15",
"1975-10-03",
"1975-10-09",
"1975-10-24",
"1975-12-25",
"1975-12-29",
"1975-12-30",
"1975-12-31",
"1976-01-01",
"1976-01-02",
"1976-03-01",
"1976-03-10",
"1976-04-05",
"1976-05-05",
"1976-05-06",
"1976-09-08",
"1976-10-01",
"1976-12-29",
"1976-12-30",
"1976-12-31",
"1977-01-03",
"1977-03-01",
"1977-03-10",
"1977-04-05",
"1977-05-05",
"1977-05-25",
"1977-06-06",
"1977-08-15",
"1977-09-27",
"1977-10-03",
"1977-12-26",
"1977-12-27",
"1977-12-28",
"1977-12-29",
"1977-12-30",
"1978-01-02",
"1978-01-03",
"1978-03-01",
"1978-03-10",
"1978-04-05",
"1978-05-05",
"1978-05-18",
"1978-06-06",
"1978-07-17",
"1978-08-15",
"1978-10-03",
"1978-10-09",
"1978-12-12",
"1978-12-25",
"1978-12-26",
"1978-12-27",
"1978-12-28",
"1978-12-29",
"1979-01-01",
"1979-01-02",
"1979-01-03",
"1979-03-01",
"1979-04-05",
"1979-05-03",
"1979-06-06",
"1979-07-17",
"1979-08-15",
"1979-10-01",
"1979-10-03",
"1979-10-05",
"1979-10-09",
"1979-12-21",
"1979-12-25",
"1979-12-26",
"1979-12-27",
"1979-12-28",
"1979-12-31",
"1980-01-01",
"1980-01-02",
"1980-01-03",
"1980-03-10",
"1980-05-05",
"1980-05-21",
"1980-06-06",
"1980-07-17",
"1980-08-15",
"1980-09-01",
"1980-09-23",
"1980-10-01",
"1980-10-03",
"1980-10-09",
"1980-10-22",
"1980-12-25",
"1980-12-26",
"1980-12-29",
"1980-12-30",
"1980-12-31",
"1981-01-01",
"1981-01-02",
"1981-02-11",
"1981-03-03",
"1981-03-10",
"1981-03-25",
"1981-05-05",
"1981-05-11",
"1981-07-17",
"1981-10-01",
"1981-10-09",
"1981-12-25",
"1981-12-28",
"1981-12-29",
"1981-12-30",
"1981-12-31",
"1982-01-01",
"1982-03-01",
"1982-03-10",
"1982-04-05",
"1982-05-05",
"1982-10-01",
"1982-12-27",
"1982-12-28",
"1982-12-29",
"1982-12-30",
"1982-12-31",
"1983-01-03",
"1983-03-01",
"1983-03-10",
"1983-04-05",
"1983-05-05",
"1983-05-20",
"1983-06-06",
"1983-08-15",
"1983-09-21",
"1983-10-03",
"1983-12-26",
"1983-12-27",
"1983-12-28",
"1983-12-29",
"1983-12-30",
"1984-01-02",
"1984-01-03",
"1984-03-01",
"1984-04-05",
"1984-05-08",
"1984-06-06",
"1984-07-17",
"1984-08-15",
"1984-09-10",
"1984-10-01",
"1984-10-03",
"1984-10-09",
"1984-12-25",
"1984-12-26",
"1984-12-27",
"1984-12-28",
"1984-12-31",
"1985-01-01",
"1985-01-02",
"1985-01-03",
"1985-02-12",
"1985-02-20",
"1985-03-01",
"1985-04-05",
"1985-05-27",
"1985-06-06",
"1985-07-17",
"1985-08-15",
"1985-10-01",
"1985-10-03",
"1985-10-09",
"1985-12-25",
"1985-12-27",
"1985-12-30",
"1985-12-31",
"1986-01-01",
"1986-01-02",
"1986-01-03",
"1986-03-10",
"1986-05-05",
"1986-05-16",
"1986-06-06",
"1986-07-17",
"1986-08-15",
"1986-09-18",
"1986-09-19",
"1986-10-01",
"1986-10-03",
"1986-10-09",
"1986-12-25",
"1986-12-29",
"1986-12-30",
"1986-12-31",
"1987-01-01",
"1987-01-02",
"1987-01-29",
"1987-03-10",
"1987-05-05",
"1987-07-17",
"1987-10-01",
"1987-10-07",
"1987-10-08",
"1987-10-09",
"1987-10-27",
"1987-12-16",
"1987-12-25",
"1987-12-28",
"1987-12-29",
"1987-12-30",
"1987-12-31",
"1988-01-01",
"1988-02-18",
"1988-02-25",
"1988-03-01",
"1988-03-10",
"1988-04-05",
"1988-04-26",
"1988-05-05",
"1988-05-23",
"1988-06-06",
"1988-08-15",
"1988-09-26",
"1988-10-03",
"1988-12-27",
"1988-12-28",
"1988-12-29",
"1988-12-30",
"1989-01-02",
"1989-01-03",
"1989-02-06",
"1989-02-07",
"1989-03-01",
"1989-03-10",
"1989-04-05",
"1989-05-05",
"1989-05-12",
"1989-06-06",
"1989-07-17",
"1989-08-15",
"1989-09-13",
"1989-09-14",
"1989-09-15",
"1989-10-02",
"1989-10-03",
"1989-10-09",
"1989-12-25",
"1989-12-27",
"1989-12-28",
"1989-12-29",
"1990-01-01",
"1990-01-02",
"1990-01-26",
"1990-03-01",
"1990-04-05",
"1990-05-02",
"1990-06-06",
"1990-07-17",
"1990-08-15",
"1990-10-01",
"1990-10-02",
"1990-10-03",
"1990-10-04",
"1990-10-09",
"1990-12-25",
"1990-12-27",
"1990-12-28",
"1990-12-31",
"1991-01-01",
"1991-01-02",
"1991-02-14",
"1991-02-15",
"1991-03-01",
"1991-03-26",
"1991-04-05",
"1991-05-21",
"1991-06-06",
"1991-06-20",
"1991-07-17",
"1991-08-15",
"1991-09-23",
"1991-10-03",
"1991-12-25",
"1991-12-27",
"1991-12-30",
"1991-12-31",
"1992-01-01",
"1992-01-02",
"1992-02-03",
"1992-02-04",
"1992-02-05",
"1992-03-10",
"1992-03-24",
"1992-05-05",
"1992-07-17",
"1992-09-10",
"1992-09-11",
"1992-12-18",
"1992-12-25",
"1992-12-29",
"1992-12-30",
"1992-12-31",
"1993-01-01",
"1993-01-22",
"1993-03-01",
"1993-03-10",
"1993-04-05",
"1993-05-05",
"1993-05-28",
"1993-09-29",
"1993-09-30",
"1993-10-01",
"1993-12-29",
"1993-12-30",
"1993-12-31",
"1994-02-09",
"1994-02-10",
"1994-02-11",
"1994-03-01",
"1994-04-05",
"1994-05-05",
"1994-05-18",
"1994-06-06",
"1994-08-15",
"1994-09-19",
"1994-09-20",
"1994-09-21",
"1994-10-03",
"1995-01-02",
"1996-01-01",
"1996-01-02",
"1997-01-01",
"1997-01-02",
"1997-12-29",
"1997-12-30",
"1997-12-31",
"1998-01-01",
"1998-01-02",
"1998-12-29",
"1998-12-30",
"1998-12-31",
"1999-01-01",
"1999-12-29",
"1999-12-30",
"1999-12-31",
"2000-01-03",
"2000-12-27",
"2000-12-28",
"2000-12-29",
"2001-01-01",
"2001-12-31",
"2002-01-01",
"2002-12-31",
"2003-01-01",
"2003-12-31",
"2004-01-01",
"2004-12-31",
"2005-12-30",
"2006-12-29",
"2007-01-01",
"2007-12-31",
"2008-01-01",
"2008-04-09",
"2008-05-05",
"2008-05-12",
"2008-08-15",
"2008-09-15",
"2008-10-03",
"2008-12-25",
"2008-12-31",
"2009-01-01",
"2009-01-26",
"2009-01-27",
"2009-05-01",
"2009-05-05",
"2009-10-02",
"2009-12-25",
"2009-12-31",
"2010-01-01",
"2010-02-15",
"2010-03-01",
"2010-05-05",
"2010-05-21",
"2010-06-02",
"2010-09-21",
"2010-09-22",
"2010-09-23",
"2010-12-31",
"2011-02-02",
"2011-02-03",
"2011-02-04",
"2011-03-01",
"2011-05-05",
"2011-05-10",
"2011-06-06",
"2011-08-15",
"2011-09-12",
"2011-09-13",
"2011-10-03",
"2011-12-30",
"2012-01-23",
"2012-01-24",
"2012-03-01",
"2012-04-11",
"2012-05-01",
"2012-05-28",
"2012-06-06",
"2012-08-15",
"2012-10-01",
"2012-10-03",
"2012-12-19",
"2012-12-25",
"2012-12-31",
"2013-01-01",
"2013-02-11",
"2013-03-01",
"2013-05-01",
"2013-05-17",
"2013-06-06",
"2013-08-15",
"2013-09-18",
"2013-09-19",
"2013-09-20",
"2013-10-03",
"2013-10-09",
"2013-12-25",
"2013-12-31",
"2014-01-01",
"2014-01-30",
"2014-01-31",
"2014-05-01",
"2014-05-05",
"2014-05-06",
"2014-06-04",
"2014-06-06",
"2014-08-15",
"2014-09-08",
"2014-09-09",
"2014-09-10",
"2014-10-03",
"2014-10-09",
"2014-12-25",
"2014-12-31",
"2015-01-01",
"2015-02-18",
"2015-02-19",
"2015-02-20",
"2015-05-01",
"2015-05-05",
"2015-05-25",
"2015-08-14",
"2015-09-28",
"2015-09-29",
"2015-10-09",
"2015-12-25",
"2015-12-31",
"2016-01-01",
"2016-02-08",
"2016-02-09",
"2016-02-10",
"2016-03-01",
"2016-04-13",
"2016-05-05",
"2016-05-06",
"2016-06-06",
"2016-08-15",
"2016-09-14",
"2016-09-15",
"2016-09-16",
"2016-10-03",
"2016-12-30",
"2017-01-27",
"2017-01-30",
"2017-03-01",
"2017-05-01",
"2017-05-03",
"2017-05-05",
"2017-05-09",
"2017-06-06",
"2017-08-15",
"2017-10-02",
"2017-10-03",
"2017-10-04",
"2017-10-05",
"2017-10-06",
"2017-10-09",
"2017-12-25",
"2017-12-29",
"2018-01-01",
"2018-02-15",
"2018-02-16",
"2018-03-01",
"2018-05-01",
"2018-05-07",
"2018-05-22",
"2018-06-06",
"2018-06-13",
"2018-08-15",
"2018-09-24",
"2018-09-25",
"2018-09-26",
"2018-10-03",
"2018-10-09",
"2018-12-25",
"2018-12-31",
"2019-01-01",
"2019-02-04",
"2019-02-05",
"2019-02-06",
"2019-03-01",
"2019-05-01",
"2019-05-06",
"2019-06-06",
"2019-08-15",
"2019-09-12",
"2019-09-13",
"2019-10-03",
"2019-10-09",
"2019-12-25",
"2019-12-31",
"2020-01-01",
"2020-01-24",
"2020-01-27",
"2020-04-15",
"2020-04-30",
"2020-05-01",
"2020-05-05",
"2020-08-17",
"2020-09-30",
"2020-10-01",
"2020-10-02",
"2020-10-09",
"2020-12-25",
"2020-12-31",
"2021-01-01",
"2021-02-11",
"2021-02-12",
"2021-03-01",
"2021-05-05",
"2021-05-19",
"2021-09-20",
"2021-09-21",
"2021-09-22",
"2021-12-31",
]
)
# Merging two holidays to get full precomputed holidays list.
precomputed_krx_holidays = original_precomputed_krx_holidays.union(
dumped_precomputed_krx_holidays
)
# Korean regular holidays
NewYearsDay = KoreanSolarHoliday(
"New Years Day", month=1, day=1
) # New years day previously had 2 additional following holidays
NewYearsDayAfter = KoreanSolarHoliday(
"New Years Day (+1 day)",
month=1,
day=1,
offset=Day(1),
end_date= | pd.Timestamp("1998-12-31") | pandas.Timestamp |
"""High-level functions to help perform complex tasks
"""
from __future__ import print_function, division
import os
import multiprocessing as mp
import warnings
from datetime import datetime
import platform
import struct
import shutil
import copy
import time
from ast import literal_eval
import traceback
import sys
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
from ..pyemu_warnings import PyemuWarning
try:
import flopy
except:
pass
import pyemu
from pyemu.utils.os_utils import run, start_workers
def geostatistical_draws(
pst, struct_dict, num_reals=100, sigma_range=4, verbose=True, scale_offset=True
):
"""construct a parameter ensemble from a prior covariance matrix
implied by geostatistical structure(s) and parameter bounds.
Args:
pst (`pyemu.Pst`): a control file (or the name of control file). The
parameter bounds in `pst` are used to define the variance of each
parameter group.
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
num_reals (`int`, optional): number of realizations to draw. Default is 100
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`,optional): flag to apply scale and offset to parameter bounds
when calculating variances - this is passed through to `pyemu.Cov.from_parameter_data()`.
Default is True.
Returns
`pyemu.ParameterEnsemble`: the realized parameter ensemble.
Note:
parameters are realized by parameter group. The variance of each
parameter group is used to scale the resulting geostatistical
covariance matrix Therefore, the sill of the geostatistical structures
in `struct_dict` should be 1.0
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
pe.to_csv("my_pe.csv")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}".format(
type(pst)
)
if verbose:
print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(
pst, sigma_range=sigma_range, scale_offset=scale_offset
)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
keys = list(struct_dict.keys())
keys.sort()
for gs in keys:
items = struct_dict[gs]
if verbose:
print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn(
"using first geostat structure in file {0}".format(gs), PyemuWarning
)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("GeoStruct {0} sill != 1.0 - this is bad!".format(gs.name))
if not isinstance(items, list):
items = [items]
# items.sort()
for iitem, item in enumerate(items):
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found".format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if "pargp" in df.columns:
if verbose:
print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ["x", "y", "parnme"]:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn(
"the following parameters are not "
+ "in the control file: {0}".format(",".join(missing)),
PyemuWarning,
)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if df.shape[0] == 0:
warnings.warn(
"geostatistical_draws(): empty parameter df at position {0} items for geostruct {1}, skipping...".format(
iitem, gs
)
)
continue
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn(
"all parameters in zone {0} tied and/or fixed, skipping...".format(
zone
),
PyemuWarning,
)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose:
print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose:
print("done")
if verbose:
print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose:
print("scaling full cov by diag var cov")
# cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i, :] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(
pst=pst, cov=cov, num_reals=num_reals, by_groups=False, fill=False
)
# df = pe.iloc[:,:]
par_ens.append(pe._df)
pars_in_cov.update(set(pe.columns))
if verbose:
print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if len(diff) > 0:
name_dict = {name: i for i, name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec, names=diff, isdiagonal=True)
# cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(
pst, cov, num_reals=num_reals, fill=False
)
par_ens.append(pe._df)
par_ens = pd.concat(par_ens, axis=1)
par_ens = pyemu.ParameterEnsemble(pst=pst, df=par_ens)
return par_ens
def geostatistical_prior_builder(
pst, struct_dict, sigma_range=4, verbose=False, scale_offset=False
):
"""construct a full prior covariance matrix using geostastical structures
and parameter bounds information.
Args:
pst (`pyemu.Pst`): a control file instance (or the name of control file)
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`): a flag to apply scale and offset to parameter upper and lower bounds
before applying log transform. Passed to pyemu.Cov.from_parameter_data(). Default
is False
Returns:
`pyemu.Cov`: a covariance matrix that includes all adjustable parameters in the control
file.
Note:
The covariance of parameters associated with geostatistical structures is defined
as a mixture of GeoStruct and bounds. That is, the GeoStruct is used to construct a
pyemu.Cov, then the entire pyemu.Cov is scaled by the uncertainty implied by the bounds and
sigma_range. Most users will want to sill of the geostruct to sum to 1.0 so that the resulting
covariance matrices have variance proportional to the parameter bounds. Sounds complicated...
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
cov = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
cov.to_binary("prior.jcb")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}".format(
type(pst)
)
if verbose:
print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(
pst, sigma_range=sigma_range, scale_offset=scale_offset
)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# full_cov = None
par = pst.parameter_data
for gs, items in struct_dict.items():
if verbose:
print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn(
"using first geostat structure in file {0}".format(gs), PyemuWarning
)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn(
"geostatistical_prior_builder() warning: geostruct sill != 1.0, user beware!"
)
if not isinstance(items, list):
items = [items]
for item in items:
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found".format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
for req in ["x", "y", "parnme"]:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn(
"the following parameters are not "
+ "in the control file: {0}".format(",".join(missing)),
PyemuWarning,
)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn(
"all parameters in zone {0} tied and/or fixed, skipping...".format(
zone
),
PyemuWarning,
)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose:
print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose:
print("done")
# find the variance in the diagonal cov
if verbose:
print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
# if np.std(tpl_var) > 1.0e-6:
# warnings.warn("pars have different ranges" +\
# " , using max range as variance for all pars")
# tpl_var = tpl_var.max()
if verbose:
print("scaling full cov by diag var cov")
cov *= tpl_var
if verbose:
print("test for inversion")
try:
ci = cov.inv
except:
df_zone.to_csv("prior_builder_crash.csv")
raise Exception("error inverting cov {0}".format(cov.row_names[:3]))
if verbose:
print("replace in full cov")
full_cov.replace(cov)
# d = np.diag(full_cov.x)
# idx = np.argwhere(d==0.0)
# for i in idx:
# print(full_cov.names[i])
return full_cov
def _rmse(v1, v2):
"""return root mean squared error between v1 and v2
Args:
v1 (iterable): one vector
v2 (iterable): another vector
Returns:
scalar: root mean squared error of v1,v2
"""
return np.sqrt(np.mean(np.square(v1 - v2)))
def calc_observation_ensemble_quantiles(
ens, pst, quantiles, subset_obsnames=None, subset_obsgroups=None
):
"""Given an observation ensemble, and requested quantiles, this function calculates the requested
quantile point-by-point in the ensemble. This resulting set of values does not, however, correspond
to a single realization in the ensemble. So, this function finds the minimum weighted squared
distance to the quantile and labels it in the ensemble. Also indicates which realizations
correspond to the selected quantiles.
Args:
ens (pandas DataFrame): DataFrame read from an observation
pst (pyemy.Pst object) - needed to obtain observation weights
quantiles (iterable): quantiles ranging from 0-1.0 for which results requested
subset_obsnames (iterable): list of observation names to include in calculations
subset_obsgroups (iterable): list of observation groups to include in calculations
Returns:
ens (pandas DataFrame): same ens object that was input but with quantile realizations
appended as new rows labelled with 'q_#' where '#' is the slected quantile
quantile_idx (dictionary): dictionary with keys being quantiles and values being realizations
corresponding to each realization
"""
# TODO: handle zero weights due to PDC
quantile_idx = {}
# make sure quantiles and subset names and groups are lists
if not isinstance(quantiles, list):
quantiles = list(quantiles)
if not isinstance(subset_obsnames, list) and subset_obsnames is not None:
subset_obsnames = list(subset_obsnames)
if not isinstance(subset_obsgroups, list) and subset_obsgroups is not None:
subset_obsgroups = list(subset_obsgroups)
if "real_name" in ens.columns:
ens.set_index("real_name")
# if 'base' real was lost, then the index is of type int. needs to be string later so set here
ens.index = [str(i) for i in ens.index]
if not isinstance(pst, pyemu.Pst):
raise Exception("pst object must be of type pyemu.Pst")
# get the observation data
obs = pst.observation_data.copy()
# confirm that the indices and weights line up
if False in np.unique(ens.columns == obs.index):
raise Exception("ens and pst observation names do not align")
# deal with any subsetting of observations that isn't handled through weights
trimnames = obs.index.values
if subset_obsgroups is not None and subset_obsnames is not None:
raise Exception(
"can only specify information in one of subset_obsnames of subset_obsgroups. not both"
)
if subset_obsnames is not None:
trimnames = subset_obsnames
if len(set(trimnames) - set(obs.index.values)) != 0:
raise Exception(
"the following names in subset_obsnames are not in the ensemble:\n"
+ ["{}\n".format(i) for i in (set(trimnames) - set(obs.index.values))]
)
if subset_obsgroups is not None:
if len((set(subset_obsgroups) - set(pst.obs_groups))) != 0:
raise Exception(
"the following groups in subset_obsgroups are not in pst:\n"
+ [
"{}\n".format(i)
for i in (set(subset_obsgroups) - set(pst.obs_groups))
]
)
trimnames = obs.loc[obs.obgnme.isin(subset_obsgroups)].obsnme.tolist()
if len((set(trimnames) - set(obs.index.values))) != 0:
raise Exception(
"the following names in subset_obsnames are not in the ensemble:\n"
+ ["{}\n".format(i) for i in (set(trimnames) - set(obs.index.values))]
)
# trim the data to subsets (or complete )
ens_eval = ens[trimnames].copy()
weights = obs.loc[trimnames].weight.values
for cq in quantiles:
# calculate the point-wise quantile values
qfit = np.quantile(ens_eval, cq, axis=0)
# calculate the weighted distance between all reals and the desired quantile
qreal = np.argmin(
np.linalg.norm([(i - qfit) * weights for i in ens_eval.values], axis=1)
)
quantile_idx["q{}".format(cq)] = qreal
ens = ens.append(ens.iloc[qreal])
idx = ens.index.values
idx[-1] = "q{}".format(cq)
ens.set_index(idx, inplace=True)
return ens, quantile_idx
def calc_rmse_ensemble(ens, pst, bygroups=True, subset_realizations=None):
"""Calculates RMSE (without weights) to quantify fit to observations for ensemble members
Args:
ens (pandas DataFrame): DataFrame read from an observation
pst (pyemy.Pst object) - needed to obtain observation weights
bygroups (Bool): Flag to summarize by groups or not. Defaults to True.
subset_realizations (iterable, optional): Subset of realizations for which
to report RMSE. Defaults to None which returns all realizations.
Returns:
rmse (pandas DataFrame object): rows are realizations. Columns are groups. Content is RMSE
"""
# TODO: handle zero weights due to PDC
# make sure subset_realizations is a list
if not isinstance(subset_realizations, list) and subset_realizations is not None:
subset_realizations = list(subset_realizations)
if "real_name" in ens.columns:
ens.set_index("real_name")
if not isinstance(pst, pyemu.Pst):
raise Exception("pst object must be of type pyemu.Pst")
# get the observation data
obs = pst.observation_data.copy()
# confirm that the indices and observations line up
if False in np.unique(ens.columns == obs.index):
raise Exception("ens and pst observation names do not align")
rmse = pd.DataFrame(index=ens.index)
if subset_realizations is not None:
rmse = rmse.loc[subset_realizations]
# calculate the rmse total first
rmse["total"] = [_rmse(ens.loc[i], obs.obsval) for i in rmse.index]
# if bygroups, do the groups as columns
if bygroups is True:
for cg in obs.obgnme.unique():
cnames = obs.loc[obs.obgnme == cg].obsnme
rmse[cg] = [
_rmse(ens.loc[i][cnames], obs.loc[cnames].obsval) for i in rmse.index
]
return rmse
def _condition_on_par_knowledge(cov, par_knowledge_dict):
"""experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception(
"par knowledge dict parameters not found: {0}".format(",".join(missing))
)
# build the selection matrix and sigma epsilon
# sel = pyemu.Cov(x=np.identity(cov.shape[0]),names=cov.row_names)
sel = cov.zero2d
sel = cov.to_pearson()
new_cov_diag = pyemu.Cov(x=np.diag(cov.as_2d.diagonal()), names=cov.row_names)
# new_cov_diag = cov.zero2d
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
for _ in range(2):
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
print(new_cov_diag)
return new_cov_diag
def kl_setup(
num_eig,
sr,
struct,
prefixes,
factors_file="kl_factors.dat",
islog=True,
basis_file=None,
tpl_dir=".",
):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Args:
num_eig (`int`): the number of basis vectors to retain in the
reduced basis
sr (`flopy.reference.SpatialReference`): a spatial reference instance
struct (`str`): a PEST-style structure file. Can also be a
`pyemu.geostats.Geostruct` instance.
prefixes ([`str`]): a list of parameter prefixes to generate KL
parameterization for.
factors_file (`str`, optional): name of the PEST-style interpolation
factors file to write (can be processed with FAC2REAL).
Default is "kl_factors.dat".
islog (`bool`, optional): flag to indicate if the parameters are log transformed.
Default is True
basis_file (`str`, optional): the name of the PEST-style binary (e.g. jco)
file to write the reduced basis vectors to. Default is None (not saved).
tpl_dir (`str`, optional): the directory to write the resulting
template files to. Default is "." (current directory).
Returns:
`pandas.DataFrame`: a dataframe of parameter information.
Note:
This is the companion function to `helpers.apply_kl()`
Example::
m = flopy.modflow.Modflow.load("mymodel.nam")
prefixes = ["hk","vka","ss"]
df = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",prefixes)
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr, flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct, str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i, j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(
sr.xcentergrid.flatten(), sr.ycentergrid.flatten(), names=names
)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
# trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:, :num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name": eig_names}, index=eig_names)
pp_df.loc[:, "x"] = -1.0 * sr.ncol
pp_df.loc[:, "y"] = -1.0 * sr.nrow
pp_df.loc[:, "zone"] = -999
pp_df.loc[:, "parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"), pp_df)
_eigen_basis_to_factor_file(
sr.nrow, sr.ncol, trunc_basis, factors_file=factors_file, islog=islog
)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir, "{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat", tpl_file, prefix)
shutil.copy2("temp.dat", tpl_file.replace(".tpl", ""))
df.loc[:, "tpl_file"] = tpl_file
df.loc[:, "in_file"] = tpl_file.replace(".tpl", "")
df.loc[:, "prefix"] = prefix
df.loc[:, "pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
# arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:, "parubnd"] = 10.0
df.loc[:, "parlbnd"] = 0.1
return pd.concat(dfs)
# back_array_dict = {}
# f = open(tpl_file,'w')
# f.write("ptf ~\n")
# f.write("name,org_val,new_val\n")
# for name,array in array_dict.items():
# mname = name+"mean"
# f.write("{0},{1:20.8E},~ {2} ~\n".format(mname,0.0,mname))
# #array -= array.mean()
# array_flat = pyemu.Matrix(x=np.atleast_2d(array.flatten()).transpose()
# ,col_names=["flat"],row_names=names,
# isdiagonal=False)
# factors = trunc_basis * array_flat
# enames = ["{0}{1:04d}".format(name,i) for i in range(num_eig)]
# for n,val in zip(enames,factors.x):
# f.write("{0},{1:20.8E},~ {0} ~\n".format(n,val[0]))
# back_array_dict[name] = (factors.T * trunc_basis).x.reshape(array.shape)
# print(array_back)
# print(factors.shape)
#
# return back_array_dict
def _eigen_basis_to_factor_file(nrow, ncol, basis, factors_file, islog=True):
assert nrow * ncol == basis.shape[0]
with open(factors_file, "w") as f:
f.write("junk.dat\n")
f.write("junk.zone.dat\n")
f.write("{0} {1}\n".format(ncol, nrow))
f.write("{0}\n".format(basis.shape[1]))
[f.write(name + "\n") for name in basis.col_names]
t = 0
if islog:
t = 1
for i in range(nrow * ncol):
f.write("{0} {1} {2} {3:8.5e}".format(i + 1, t, basis.shape[1], 0.0))
[
f.write(" {0} {1:12.8g} ".format(i + 1, w))
for i, w in enumerate(basis.x[i, :])
]
f.write("\n")
def kl_apply(par_file, basis_file, par_to_file_dict, arr_shape):
"""Apply a KL parameterization transform from basis factors to model
input arrays.
Args:
par_file (`str`): the csv file to get factor values from. Must contain
the following columns: "name", "new_val", "org_val"
basis_file (`str`): the PEST-style binary file that contains the reduced
basis
par_to_file_dict (`dict`): a mapping from KL parameter prefixes to array
file names.
arr_shape (tuple): a length 2 tuple of number of rows and columns
the resulting arrays should have.
Note:
This is the companion function to kl_setup.
This function should be called during the forward run
"""
df = pd.read_csv(par_file)
assert "name" in df.columns
assert "org_val" in df.columns
assert "new_val" in df.columns
df.loc[:, "prefix"] = df.name.apply(lambda x: x[:-4])
for prefix in df.prefix.unique():
assert prefix in par_to_file_dict.keys(), "missing prefix:{0}".format(prefix)
basis = pyemu.Matrix.from_binary(basis_file)
assert basis.shape[1] == arr_shape[0] * arr_shape[1]
arr_min = 1.0e-10 # a temp hack
# means = df.loc[df.name.apply(lambda x: x.endswith("mean")),:]
# print(means)
df = df.loc[df.name.apply(lambda x: not x.endswith("mean")), :]
for prefix, filename in par_to_file_dict.items():
factors = pyemu.Matrix.from_dataframe(df.loc[df.prefix == prefix, ["new_val"]])
factors.autoalign = False
basis_prefix = basis[: factors.shape[0], :]
arr = (factors.T * basis_prefix).x.reshape(arr_shape)
# arr += means.loc[means.prefix==prefix,"new_val"].values
arr[arr < arr_min] = arr_min
np.savetxt(filename, arr, fmt="%20.8E")
def zero_order_tikhonov(pst, parbounds=True, par_groups=None, reset=True):
"""setup preferred-value regularization in a pest control file.
Args:
pst (`pyemu.Pst`): the control file instance
parbounds (`bool`, optional): flag to weight the new prior information
equations according to parameter bound width - approx the KL
transform. Default is True
par_groups (`list`): a list of parameter groups to build PI equations for.
If None, all adjustable parameters are used. Default is None
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
Example::
pst = pyemu.Pst("my.pst")
pyemu.helpers.zero_order_tikhonov(pst)
pst.write("my_reg.pst")
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
pt = row["partrans"].lower()
try:
pt = pt.decode()
except:
pass
if pt not in ["tied", "fixed"] and row["pargp"] in par_groups:
pilbl.append(row["parnme"])
weight.append(1.0)
ogp_name = "regul" + row["pargp"]
obgnme.append(ogp_name[:12])
parnme = row["parnme"]
parval1 = row["parval1"]
if pt == "log":
parnme = "log(" + parnme + ")"
parval1 = np.log10(parval1)
eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1)
equation.append(eq)
if reset:
pst.prior_information = pd.DataFrame(
{"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}
)
else:
pi = pd.DataFrame(
{"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}
)
pst.prior_information = pst.prior_information.append(pi)
if parbounds:
_regweight_from_parbound(pst)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def _regweight_from_parbound(pst):
"""sets regularization weights from parameter bounds
which approximates the KL expansion. Called by
zero_order_tikhonov().
"""
pst.parameter_data.index = pst.parameter_data.parnme
pst.prior_information.index = pst.prior_information.pilbl
for idx, parnme in enumerate(pst.prior_information.pilbl):
if parnme in pst.parameter_data.index:
row = pst.parameter_data.loc[parnme, :]
lbnd, ubnd = row["parlbnd"], row["parubnd"]
if row["partrans"].lower() == "log":
weight = 1.0 / (np.log10(ubnd) - np.log10(lbnd))
else:
weight = 1.0 / (ubnd - lbnd)
pst.prior_information.loc[parnme, "weight"] = weight
else:
print(
"prior information name does not correspond"
+ " to a parameter: "
+ str(parnme)
)
def first_order_pearson_tikhonov(pst, cov, reset=True, abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
Args:
pst (`pyemu.Pst`): the PEST control file
cov (`pyemu.Cov`): a covariance matrix instance with
some or all of the parameters listed in `pst`.
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
abs_drop_tol (`float`, optional): tolerance to control how many pi equations
are written. If the absolute value of the Pearson CC is less than
abs_drop_tol, the prior information equation will not be included in
the control file.
Note:
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Example::
pst = pyemu.Pst("my.pst")
cov = pyemu.Cov.from_ascii("my.cov")
pyemu.helpers.first_order_pearson_tikhonov(pst,cov)
pst.write("my_reg.pst")
"""
assert isinstance(cov, pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.to_pearson()
# print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x: x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i, iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j, jname in enumerate(cc_mat.row_names[i + 1 :]):
if jname not in sadj_names:
continue
# print(i,iname,i+j+1,jname)
cc = cc_mat.x[i, j + i + 1]
if cc < abs_drop_tol:
continue
pilbl.append("pcc_{0}".format(pi_num))
iiname = str(iname)
if str(ptrans[iname]) == "log":
iiname = "log(" + iname + ")"
jjname = str(jname)
if str(ptrans[jname]) == "log":
jjname = "log(" + jname + ")"
equation.append("1.0 * {0} - 1.0 * {1} = 0.0".format(iiname, jjname))
weight.append(cc)
obgnme.append("regul_cc")
pi_num += 1
df = pd.DataFrame(
{"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}
)
df.index = df.pilbl
if reset:
pst.prior_information = df
else:
pst.prior_information = pst.prior_information.append(df)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def simple_tpl_from_pars(parnames, tplfilename="model.input.tpl"):
"""Make a simple template file from a list of parameter names.
Args:
parnames ([`str`]): list of parameter names to put in the
new template file
tplfilename (`str`): Name of the template file to create. Default
is "model.input.tpl"
Note:
writes a file `tplfilename` with each parameter name in `parnames` on a line
"""
with open(tplfilename, "w") as ofp:
ofp.write("ptf ~\n")
[ofp.write("~{0:^12}~\n".format(cname)) for cname in parnames]
def simple_ins_from_obs(obsnames, insfilename="model.output.ins"):
"""write a simple instruction file that reads the values named
in obsnames in order, one per line from a model output file
Args:
obsnames (`str`): list of observation names to put in the
new instruction file
insfilename (`str`): the name of the instruction file to
create. Default is "model.output.ins"
Note:
writes a file `insfilename` with each observation read off
of a single line
"""
with open(insfilename, "w") as ofp:
ofp.write("pif ~\n")
[ofp.write("!{0}!\n".format(cob)) for cob in obsnames]
def pst_from_parnames_obsnames(
parnames, obsnames, tplfilename="model.input.tpl", insfilename="model.output.ins"
):
"""Creates a Pst object from a list of parameter names and a list of observation names.
Args:
parnames (`str`): list of parameter names
obsnames (`str`): list of observation names
tplfilename (`str`): template filename. Default is "model.input.tpl"
insfilename (`str`): instruction filename. Default is "model.output.ins"
Returns:
`pyemu.Pst`: the generic control file
"""
simple_tpl_from_pars(parnames, tplfilename)
simple_ins_from_obs(obsnames, insfilename)
modelinputfilename = tplfilename.replace(".tpl", "")
modeloutputfilename = insfilename.replace(".ins", "")
return pyemu.Pst.from_io_files(
tplfilename, modelinputfilename, insfilename, modeloutputfilename
)
def read_pestpp_runstorage(filename, irun=0, with_metadata=False):
"""read pars and obs from a specific run in a pest++ serialized
run storage file into dataframes.
Args:
filename (`str`): the name of the run storage file
irun (`int`): the run id to process. If 'all', then all runs are
read. Default is 0
with_metadata (`bool`): flag to return run stats and info txt as well
Returns:
tuple containing
- **pandas.DataFrame**: parameter information
- **pandas.DataFrame**: observation information
- **pandas.DataFrame**: optionally run status and info txt.
"""
header_dtype = np.dtype(
[
("n_runs", np.int64),
("run_size", np.int64),
("p_name_size", np.int64),
("o_name_size", np.int64),
]
)
try:
irun = int(irun)
except:
if irun.lower() == "all":
irun = irun.lower()
else:
raise Exception(
"unrecognized 'irun': should be int or 'all', not '{0}'".format(irun)
)
def status_str(r_status):
if r_status == 0:
return "not completed"
if r_status == 1:
return "completed"
if r_status == -100:
return "canceled"
else:
return "failed"
assert os.path.exists(filename)
f = open(filename, "rb")
header = np.fromfile(f, dtype=header_dtype, count=1)
p_name_size, o_name_size = header["p_name_size"][0], header["o_name_size"][0]
par_names = (
struct.unpack("{0}s".format(p_name_size), f.read(p_name_size))[0]
.strip()
.lower()
.decode()
.split("\0")[:-1]
)
obs_names = (
struct.unpack("{0}s".format(o_name_size), f.read(o_name_size))[0]
.strip()
.lower()
.decode()
.split("\0")[:-1]
)
n_runs, run_size = header["n_runs"][0], header["run_size"][0]
run_start = f.tell()
def _read_run(irun):
f.seek(run_start + (irun * run_size))
r_status = np.fromfile(f, dtype=np.int8, count=1)
info_txt = struct.unpack("41s", f.read(41))[0].strip().lower().decode()
par_vals = np.fromfile(f, dtype=np.float64, count=len(par_names) + 1)[1:]
obs_vals = np.fromfile(f, dtype=np.float64, count=len(obs_names) + 1)[:-1]
par_df = pd.DataFrame({"parnme": par_names, "parval1": par_vals})
par_df.index = par_df.pop("parnme")
obs_df = pd.DataFrame({"obsnme": obs_names, "obsval": obs_vals})
obs_df.index = obs_df.pop("obsnme")
return r_status, info_txt, par_df, obs_df
if irun == "all":
par_dfs, obs_dfs = [], []
r_stats, txts = [], []
for irun in range(n_runs):
# print(irun)
r_status, info_txt, par_df, obs_df = _read_run(irun)
par_dfs.append(par_df)
obs_dfs.append(obs_df)
r_stats.append(r_status)
txts.append(info_txt)
par_df = pd.concat(par_dfs, axis=1).T
par_df.index = np.arange(n_runs)
obs_df = pd.concat(obs_dfs, axis=1).T
obs_df.index = np.arange(n_runs)
meta_data = pd.DataFrame({"r_status": r_stats, "info_txt": txts})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
else:
assert irun <= n_runs
r_status, info_txt, par_df, obs_df = _read_run(irun)
meta_data = pd.DataFrame({"r_status": [r_status], "info_txt": [info_txt]})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
f.close()
if with_metadata:
return par_df, obs_df, meta_data
else:
return par_df, obs_df
def jco_from_pestpp_runstorage(rnj_filename, pst_filename):
"""read pars and obs from a pest++ serialized run storage
file (e.g., .rnj) and return jacobian matrix instance
Args:
rnj_filename (`str`): the name of the run storage file
pst_filename (`str`): the name of the pst file
Note:
This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco
file in a subsequent step to avoid memory resource issues associated
with very large problems.
Returns:
`pyemu.Jco`: a jacobian matrix constructed from the run results and
pest control file information.
"""
header_dtype = np.dtype(
[
("n_runs", np.int64),
("run_size", np.int64),
("p_name_size", np.int64),
("o_name_size", np.int64),
]
)
pst = pyemu.Pst(pst_filename)
par = pst.parameter_data
log_pars = set(par.loc[par.partrans == "log", "parnme"].values)
with open(rnj_filename, "rb") as f:
header = np.fromfile(f, dtype=header_dtype, count=1)
try:
base_par, base_obs = read_pestpp_runstorage(rnj_filename, irun=0)
except:
raise Exception("couldn't get base run...")
par = par.loc[base_par.index, :]
li = base_par.index.map(lambda x: par.loc[x, "partrans"] == "log")
base_par.loc[li] = base_par.loc[li].apply(np.log10)
jco_cols = {}
for irun in range(1, int(header["n_runs"])):
par_df, obs_df = read_pestpp_runstorage(rnj_filename, irun=irun)
par_df.loc[li] = par_df.loc[li].apply(np.log10)
obs_diff = base_obs - obs_df
par_diff = base_par - par_df
# check only one non-zero element per col(par)
if len(par_diff[par_diff.parval1 != 0]) > 1:
raise Exception(
"more than one par diff - looks like the file wasn't created during jco filling..."
)
parnme = par_diff[par_diff.parval1 != 0].index[0]
parval = par_diff.parval1.loc[parnme]
# derivatives
jco_col = obs_diff / parval
# some tracking, checks
print("processing par {0}: {1}...".format(irun, parnme))
print(
"%nzsens: {0}%...".format(
(jco_col[abs(jco_col.obsval) > 1e-8].shape[0] / jco_col.shape[0])
* 100.0
)
)
jco_cols[parnme] = jco_col.obsval
jco_cols = pd.DataFrame.from_records(
data=jco_cols, index=list(obs_diff.index.values)
)
jco_cols = pyemu.Jco.from_dataframe(jco_cols)
# write # memory considerations important here for very large matrices - break into chunks...
# jco_fnam = "{0}".format(filename[:-4]+".jco")
# jco_cols.to_binary(filename=jco_fnam, droptol=None, chunk=None)
return jco_cols
def parse_dir_for_io_files(d, prepend_path=False):
"""find template/input file pairs and instruction file/output file
pairs by extension.
Args:
d (`str`): directory to search for interface files
prepend_path (`bool`, optional): flag to prepend `d` to each file name.
Default is False
Note:
the return values from this function can be passed straight to
`pyemu.Pst.from_io_files()` classmethod constructor. Assumes the
template file names are <input_file>.tpl and instruction file names
are <output_file>.ins.
Returns:
tuple containing
- **[`str`]**: list of template files in d
- **[`str`]**: list of input files in d
- **[`str`]**: list of instruction files in d
- **[`str`]**: list of output files in d
"""
files = os.listdir(d)
tpl_files = [f for f in files if f.endswith(".tpl")]
in_files = [f.replace(".tpl", "") for f in tpl_files]
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", "") for f in ins_files]
if prepend_path:
tpl_files = [os.path.join(d, item) for item in tpl_files]
in_files = [os.path.join(d, item) for item in in_files]
ins_files = [os.path.join(d, item) for item in ins_files]
out_files = [os.path.join(d, item) for item in out_files]
return tpl_files, in_files, ins_files, out_files
def pst_from_io_files(
tpl_files, in_files, ins_files, out_files, pst_filename=None, pst_path=None
):
"""create a Pst instance from model interface files.
Args:
tpl_files ([`str`]): list of template file names
in_files ([`str`]): list of model input file names (pairs with template files)
ins_files ([`str`]): list of instruction file names
out_files ([`str`]): list of model output file names (pairs with instruction files)
pst_filename (`str`): name of control file to write. If None, no file is written.
Default is None
pst_path (`str`): the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. If python is being run in a directory other than where the control
file will reside, it is useful to pass `pst_path` as `.`. Default is None
Returns:
`Pst`: new control file instance with parameter and observation names
found in `tpl_files` and `ins_files`, repsectively.
Note:
calls `pyemu.helpers.pst_from_io_files()`
Assigns generic values for parameter info. Tries to use INSCHEK
to set somewhat meaningful observation values
all file paths are relatively to where python is running.
Example::
tpl_files = ["my.tpl"]
in_files = ["my.in"]
ins_files = ["my.ins"]
out_files = ["my.out"]
pst = pyemu.Pst.from_io_files(tpl_files,in_files,ins_files,out_files)
pst.control_data.noptmax = 0
pst.write("my.pst)
"""
par_names = set()
if not isinstance(tpl_files, list):
tpl_files = [tpl_files]
if not isinstance(in_files, list):
in_files = [in_files]
assert len(in_files) == len(tpl_files), "len(in_files) != len(tpl_files)"
for tpl_file in tpl_files:
assert os.path.exists(tpl_file), "template file not found: " + str(tpl_file)
# new_names = [name for name in pyemu.pst_utils.parse_tpl_file(tpl_file) if name not in par_names]
# par_names.extend(new_names)
new_names = pyemu.pst_utils.parse_tpl_file(tpl_file)
par_names.update(new_names)
if not isinstance(ins_files, list):
ins_files = [ins_files]
if not isinstance(out_files, list):
out_files = [out_files]
assert len(ins_files) == len(out_files), "len(out_files) != len(out_files)"
obs_names = []
for ins_file in ins_files:
assert os.path.exists(ins_file), "instruction file not found: " + str(ins_file)
obs_names.extend(pyemu.pst_utils.parse_ins_file(ins_file))
new_pst = pyemu.pst_utils.generic_pst(list(par_names), list(obs_names))
if "window" in platform.platform().lower() and pst_path == ".":
pst_path = ""
# new_pst.instruction_files = ins_files
# new_pst.output_files = out_files
new_pst.model_output_data = pd.DataFrame(
{"pest_file": ins_files, "model_file": out_files}, index=ins_files
)
# try to run inschek to find the observtion values
# do this here with full paths to files
pyemu.pst_utils.try_process_output_pst(new_pst)
if pst_path is not None:
tpl_files = [
os.path.join(pst_path, os.path.split(tpl_file)[-1])
for tpl_file in tpl_files
]
in_files = [
os.path.join(pst_path, os.path.split(in_file)[-1]) for in_file in in_files
]
# now set the true path location to instruction files and output files
ins_files = [
os.path.join(pst_path, os.path.split(ins_file)[-1])
for ins_file in ins_files
]
out_files = [
os.path.join(pst_path, os.path.split(out_file)[-1])
for out_file in out_files
]
new_pst.model_input_data = pd.DataFrame(
{"pest_file": tpl_files, "model_file": in_files}, index=tpl_files
)
new_pst.model_output_data = pd.DataFrame(
{"pest_file": ins_files, "model_file": out_files}, index=ins_files
)
new_pst.try_parse_name_metadata()
if pst_filename:
new_pst.write(pst_filename)
return new_pst
wildass_guess_par_bounds_dict = {
"hk": [0.01, 100.0],
"vka": [0.1, 10.0],
"sy": [0.25, 1.75],
"ss": [0.1, 10.0],
"cond": [0.01, 100.0],
"flux": [0.25, 1.75],
"rech": [0.9, 1.1],
"stage": [0.9, 1.1],
}
class PstFromFlopyModel(object):
"""a monster helper class to setup a complex PEST interface around
an existing MODFLOW-2005-family model.
Args:
model (`flopy.mbase`): a loaded flopy model instance. If model is an str, it is treated as a
MODFLOW nam file (requires org_model_ws)
new_model_ws (`str`): a directory where the new version of MODFLOW input files and PEST(++)
files will be written
org_model_ws (`str`): directory to existing MODFLOW model files. Required if model argument
is an str. Default is None
pp_props ([[`str`,[`int`]]]): pilot point multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup pilot point multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup pilot point multiplier parameters for recharge for stress
period 1,5,11,and 16.
const_props ([[`str`,[`int`]]]): constant (uniform) multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup constant (uniform) multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup constant (uniform) multiplier parameters for recharge for stress
period 1,5,11,and 16.
temporal_list_props ([[`str`,[`int`]]]): list-type input stress-period level multiplier parameters.
A nested list of list-type input elements to parameterize using
name, iterable pairs. The iterable is zero-based stress-period indices.
For example, to setup multipliers for WEL flux and for RIV conductance,
temporal_list_props = [["wel.flux",[0,1,2]],["riv.cond",None]] would setup
multiplier parameters for well flux for stress periods 1,2 and 3 and
would setup one single river conductance multiplier parameter that is applied
to all stress periods
spatial_list_props ([[`str`,[`int`]]]): list-type input for spatial multiplier parameters.
A nested list of list-type elements to parameterize using
names (e.g. [["riv.cond",0],["wel.flux",1] to setup up cell-based parameters for
each list-type element listed. These multiplier parameters are applied across
all stress periods. For this to work, there must be the same number of entries
for all stress periods. If more than one list element of the same type is in a single
cell, only one parameter is used to multiply all lists in the same cell.
grid_props ([[`str`,[`int`]]]): grid-based (every active model cell) multiplier parameters.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 in every active model cell). For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup grid-based multiplier parameters in every active model cell
for recharge for stress period 1,5,11,and 16.
sfr_pars (`bool`): setup parameters for the stream flow routing modflow package.
If list is passed it defines the parameters to set up.
sfr_temporal_pars (`bool`)
flag to include stress-period level spatially-global multipler parameters in addition to
the spatially-discrete `sfr_pars`. Requires `sfr_pars` to be passed. Default is False
grid_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to build the prior parameter covariance matrix
elements for grid-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
pp_space (`int`): number of grid cells between pilot points. If None, use the default
in pyemu.pp_utils.setup_pilot_points_grid. Default is None
zone_props ([[`str`,[`int`]]]): zone-based multiplier parameters.
A nested list of zone-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
pp_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to use for building the prior parameter
covariance matrix for pilot point parameters. If None, a generic
GeoStruct is created using pp_space and grid-spacing information.
Default is None
par_bounds_dict (`dict`): a dictionary of model property/boundary condition name, upper-lower bound pairs.
For example, par_bounds_dict = {"hk":[0.01,100.0],"flux":[0.5,2.0]} would
set the bounds for horizontal hydraulic conductivity to
0.001 and 100.0 and set the bounds for flux parameters to 0.5 and
2.0. For parameters not found in par_bounds_dict,
`pyemu.helpers.wildass_guess_par_bounds_dict` is
used to set somewhat meaningful bounds. Default is None
temporal_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for time-varying list-type multiplier parameters. This GeoStruct
express the time correlation so that the 'a' parameter is the length of
time that boundary condition multiplier parameters are correlated across.
If None, then a generic GeoStruct is created that uses an 'a' parameter
of 3 stress periods. Default is None
spatial_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for spatially-varying list-type multiplier parameters.
If None, a generic GeoStruct is created using an "a" parameter that
is 10 times the max cell size. Default is None.
remove_existing (`bool`): a flag to remove an existing new_model_ws directory. If False and
new_model_ws exists, an exception is raised. If True and new_model_ws
exists, the directory is destroyed - user beware! Default is False.
k_zone_dict (`dict`): a dictionary of zero-based layer index, zone array pairs.
e.g. {lay: np.2darray} Used to
override using ibound zones for zone-based parameterization. If None,
use ibound values greater than zero as zones. Alternatively a dictionary of dictionaries
can be passed to allow different zones to be defined for different parameters.
e.g. {"upw.hk" {lay: np.2darray}, "extra.rc11" {lay: np.2darray}}
or {"hk" {lay: np.2darray}, "rc11" {lay: np.2darray}}
use_pp_zones (`bool`): a flag to use ibound zones (or k_zone_dict, see above) as pilot
point zones. If False, ibound values greater than zero are treated as
a single zone for pilot points. Default is False
obssim_smp_pairs ([[`str`,`str`]]: a list of observed-simulated PEST-type SMP file
pairs to get observations
from and include in the control file. Default is []
external_tpl_in_pairs ([[`str`,`str`]]: a list of existing template file, model input
file pairs to parse parameters
from and include in the control file. Default is []
external_ins_out_pairs ([[`str`,`str`]]: a list of existing instruction file,
model output file pairs to parse
observations from and include in the control file. Default is []
extra_pre_cmds ([`str`]): a list of preprocessing commands to add to the forward_run.py script
commands are executed with os.system() within forward_run.py. Default is None.
redirect_forward_output (`bool`): flag for whether to redirect forward model output to text files (True) or
allow model output to be directed to the screen (False). Default is True
extra_post_cmds ([`str`]): a list of post-processing commands to add to the forward_run.py script.
Commands are executed with os.system() within forward_run.py. Default is None.
tmp_files ([`str`]): a list of temporary files that should be removed at the start of the forward
run script. Default is [].
model_exe_name (`str`): binary name to run modflow. If None, a default from flopy is used,
which is dangerous because of the non-standard binary names
(e.g. MODFLOW-NWT_x64, MODFLOWNWT, mfnwt, etc). Default is None.
build_prior (`bool`): flag to build prior covariance matrix. Default is True
sfr_obs (`bool`): flag to include observations of flow and aquifer exchange from
the sfr ASCII output file
hfb_pars (`bool`): add HFB parameters. uses pyemu.gw_utils.write_hfb_template(). the resulting
HFB pars have parval1 equal to the values in the original file and use the
spatial_list_geostruct to build geostatistical covariates between parameters
kl_props ([[`str`,[`int`]]]): karhunen-loeve based multiplier parameters.
A nested list of KL-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
kl_num_eig (`int`): the number of KL-based eigenvector multiplier parameters to use for each
KL parameter set. default is 100
kl_geostruct (`pyemu.geostats.Geostruct`): the geostatistical structure
to build the prior parameter covariance matrix
elements for KL-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
Note:
Setup up multiplier parameters for an existing MODFLOW model.
Does all kinds of coolness like building a
meaningful prior, assigning somewhat meaningful parameter groups and
bounds, writes a forward_run.py script with all the calls need to
implement multiplier parameters, run MODFLOW and post-process.
Works a lot better if TEMPCHEK, INSCHEK and PESTCHEK are available in the
system path variable
"""
def __init__(
self,
model,
new_model_ws,
org_model_ws=None,
pp_props=[],
const_props=[],
temporal_bc_props=[],
temporal_list_props=[],
grid_props=[],
grid_geostruct=None,
pp_space=None,
zone_props=[],
pp_geostruct=None,
par_bounds_dict=None,
sfr_pars=False,
temporal_sfr_pars=False,
temporal_list_geostruct=None,
remove_existing=False,
k_zone_dict=None,
mflist_waterbudget=True,
mfhyd=True,
hds_kperk=[],
use_pp_zones=False,
obssim_smp_pairs=None,
external_tpl_in_pairs=None,
external_ins_out_pairs=None,
extra_pre_cmds=None,
extra_model_cmds=None,
extra_post_cmds=None,
redirect_forward_output=True,
tmp_files=None,
model_exe_name=None,
build_prior=True,
sfr_obs=False,
spatial_bc_props=[],
spatial_list_props=[],
spatial_list_geostruct=None,
hfb_pars=False,
kl_props=None,
kl_num_eig=100,
kl_geostruct=None,
):
self.logger = pyemu.logger.Logger("PstFromFlopyModel.log")
self.log = self.logger.log
self.logger.echo = True
self.zn_suffix = "_zn"
self.gr_suffix = "_gr"
self.pp_suffix = "_pp"
self.cn_suffix = "_cn"
self.kl_suffix = "_kl"
self.arr_org = "arr_org"
self.arr_mlt = "arr_mlt"
self.list_org = "list_org"
self.list_mlt = "list_mlt"
self.forward_run_file = "forward_run.py"
self.remove_existing = remove_existing
self.external_tpl_in_pairs = external_tpl_in_pairs
self.external_ins_out_pairs = external_ins_out_pairs
self._setup_model(model, org_model_ws, new_model_ws)
self._add_external()
self.arr_mult_dfs = []
self.par_bounds_dict = par_bounds_dict
self.pp_props = pp_props
self.pp_space = pp_space
self.pp_geostruct = pp_geostruct
self.use_pp_zones = use_pp_zones
self.const_props = const_props
self.grid_props = grid_props
self.grid_geostruct = grid_geostruct
self.zone_props = zone_props
self.kl_props = kl_props
self.kl_geostruct = kl_geostruct
self.kl_num_eig = kl_num_eig
if len(temporal_bc_props) > 0:
if len(temporal_list_props) > 0:
self.logger.lraise(
"temporal_bc_props and temporal_list_props. "
+ "temporal_bc_props is deprecated and replaced by temporal_list_props"
)
self.logger.warn(
"temporal_bc_props is deprecated and replaced by temporal_list_props"
)
temporal_list_props = temporal_bc_props
if len(spatial_bc_props) > 0:
if len(spatial_list_props) > 0:
self.logger.lraise(
"spatial_bc_props and spatial_list_props. "
+ "spatial_bc_props is deprecated and replaced by spatial_list_props"
)
self.logger.warn(
"spatial_bc_props is deprecated and replaced by spatial_list_props"
)
spatial_list_props = spatial_bc_props
self.temporal_list_props = temporal_list_props
self.temporal_list_geostruct = temporal_list_geostruct
if self.temporal_list_geostruct is None:
v = pyemu.geostats.ExpVario(
contribution=1.0, a=180.0
) # 180 correlation length
self.temporal_list_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="temporal_list_geostruct"
)
self.spatial_list_props = spatial_list_props
self.spatial_list_geostruct = spatial_list_geostruct
if self.spatial_list_geostruct is None:
dist = 10 * float(
max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max())
)
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.spatial_list_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="spatial_list_geostruct"
)
self.obssim_smp_pairs = obssim_smp_pairs
self.hds_kperk = hds_kperk
self.sfr_obs = sfr_obs
self.frun_pre_lines = []
self.frun_model_lines = []
self.frun_post_lines = []
self.tmp_files = []
self.extra_forward_imports = []
if tmp_files is not None:
if not isinstance(tmp_files, list):
tmp_files = [tmp_files]
self.tmp_files.extend(tmp_files)
if k_zone_dict is None:
self.k_zone_dict = {
k: self.m.bas6.ibound[k].array for k in np.arange(self.m.nlay)
}
else:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in k_zone_dict.values()]):
# loop over outer keys
for par_key in k_zone_dict.keys():
for k, arr in k_zone_dict[par_key].items():
if k not in np.arange(self.m.nlay):
self.logger.lraise(
"k_zone_dict for par {1}, layer index not in nlay:{0}".format(
k, par_key
)
)
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise(
"k_zone_dict arr for k {0} for par{2} has wrong shape:{1}".format(
k, arr.shape, par_key
)
)
else:
for k, arr in k_zone_dict.items():
if k not in np.arange(self.m.nlay):
self.logger.lraise(
"k_zone_dict layer index not in nlay:{0}".format(k)
)
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise(
"k_zone_dict arr for k {0} has wrong shape:{1}".format(
k, arr.shape
)
)
self.k_zone_dict = k_zone_dict
# add any extra commands to the forward run lines
for alist, ilist in zip(
[self.frun_pre_lines, self.frun_model_lines, self.frun_post_lines],
[extra_pre_cmds, extra_model_cmds, extra_post_cmds],
):
if ilist is None:
continue
if not isinstance(ilist, list):
ilist = [ilist]
for cmd in ilist:
self.logger.statement("forward_run line:{0}".format(cmd))
alist.append("pyemu.os_utils.run('{0}')\n".format(cmd))
# add the model call
if model_exe_name is None:
model_exe_name = self.m.exe_name
self.logger.warn(
"using flopy binary to execute the model:{0}".format(model)
)
if redirect_forward_output:
line = "pyemu.os_utils.run('{0} {1} 1>{1}.stdout 2>{1}.stderr')".format(
model_exe_name, self.m.namefile
)
else:
line = "pyemu.os_utils.run('{0} {1} ')".format(
model_exe_name, self.m.namefile
)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_model_lines.append(line)
self.tpl_files, self.in_files = [], []
self.ins_files, self.out_files = [], []
self._setup_mult_dirs()
self.mlt_files = []
self.org_files = []
self.m_files = []
self.mlt_counter = {}
self.par_dfs = {}
self.mlt_dfs = []
self._setup_list_pars()
self._setup_array_pars()
if not sfr_pars and temporal_sfr_pars:
self.logger.lraise("use of `temporal_sfr_pars` requires `sfr_pars`")
if sfr_pars:
if isinstance(sfr_pars, str):
sfr_pars = [sfr_pars]
if isinstance(sfr_pars, list):
self._setup_sfr_pars(sfr_pars, include_temporal_pars=temporal_sfr_pars)
else:
self._setup_sfr_pars(include_temporal_pars=temporal_sfr_pars)
if hfb_pars:
self._setup_hfb_pars()
self.mflist_waterbudget = mflist_waterbudget
self.mfhyd = mfhyd
self._setup_observations()
self.build_pst()
if build_prior:
self.parcov = self.build_prior()
else:
self.parcov = None
self.log("saving intermediate _setup_<> dfs into {0}".format(self.m.model_ws))
for tag, df in self.par_dfs.items():
df.to_csv(
os.path.join(
self.m.model_ws,
"_setup_par_{0}_{1}.csv".format(
tag.replace(" ", "_"), self.pst_name
),
)
)
for tag, df in self.obs_dfs.items():
df.to_csv(
os.path.join(
self.m.model_ws,
"_setup_obs_{0}_{1}.csv".format(
tag.replace(" ", "_"), self.pst_name
),
)
)
self.log("saving intermediate _setup_<> dfs into {0}".format(self.m.model_ws))
self.logger.statement("all done")
def _setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(
self.org_model_ws, "{0}.sfr.out".format(self.m.name)
)
if not os.path.exists(org_sfr_out_file):
self.logger.lraise(
"setup_sfr_obs() error: could not locate existing sfr out file: {0}".format(
org_sfr_out_file
)
)
new_sfr_out_file = os.path.join(
self.m.model_ws, os.path.split(org_sfr_out_file)[-1]
)
shutil.copy2(org_sfr_out_file, new_sfr_out_file)
seg_group_dict = None
if isinstance(self.sfr_obs, dict):
seg_group_dict = self.sfr_obs
df = pyemu.gw_utils.setup_sfr_obs(
new_sfr_out_file,
seg_group_dict=seg_group_dict,
model=self.m,
include_path=True,
)
if df is not None:
self.obs_dfs["sfr"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_sfr_obs()")
def _setup_sfr_pars(self, par_cols=None, include_temporal_pars=None):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(
self.m, par_cols=par_cols, include_temporal_pars=include_temporal_pars
) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = | pd.concat(par_dfs["sfr"]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 29 15:30:34 2017
@author: <NAME>
"""
import random
import numpy as np
# from scipy import interpolate
import pandas as pd
# Constants for Eq. 5, Temperature -200°C to 0°C.
FROZEN_CONST = [-5.6745359 * 10**3, 6.3925247, -9.6778430 * 10**-3,
6.2215701 * 10**-7, 2.0747825 * 10**-9,
-9.4840240 * 10**-13, 4.1635019]
# Constants for Eq. 6, Temperature 0°C to 200°C.
LIQUID_CONST = [-5.8002206 * 10**3, 1.3914993, -4.8640239 * 10**-2,
4.1764768 * 10**-5, -1.4452093 * 10**-8, 6.5459673]
def setseed(randseed):
'''Seed random number generators. Called as a function in main indra
script once and only once.'''
np.random.seed(randseed)
random.seed = randseed
# ----------- END setseed function. -----------
def quantilecleaner(datain, xy_train, var, bounds=None):
'''Generic cleaner based on quantiles. Needs a time series / dataset
and cut-off quantiles. Also needs the name of the variable (var) in
the incoming dataframe. This function will censor the data outside
those quantiles and interpolate the missing values using linear
interpolation.'''
if bounds is None:
bounds = [0.01, 99.9]
dataout = pd.DataFrame(datain)
for this_month in range(1, 13):
idx_this_month_rec = xy_train.index.month == this_month
idx_this_month_syn = datain.index.month == this_month
rec_quantiles = np.percentile(
xy_train[var].iloc[idx_this_month_rec], bounds)
# import ipdb; ipdb.set_trace()
dataout = dataout.mask(
np.logical_and(
idx_this_month_syn,
np.squeeze(np.logical_or(dataout < rec_quantiles[0],
dataout > rec_quantiles[1]))),
other=np.NaN)
dataout = dataout.interpolate(
method='linear').fillna(method='bfill').fillna(method='ffill')
# Pass back values with only one dimension.
return np.squeeze(dataout.values)
# ----------- END quantilecleaner function. -----------
def solarcleaner(datain, master):
'''Clean solar values by setting zeros at corresponding times in master
to zero in the synthetic data. This is a proxy for sunrise, sunset,
and twilight.'''
# Using the source data - check to see if there
# should be sunlight at a given hour. If not,
# then set corresponding synthetic value to zero.
# If there is a negative value (usually at sunrise
# or sunset), set it to zero as well.
datain = datain.mask(datain <= 0, other=0)
return datain
# A potential improvement would be to calculate sunrise and sunset
# independently since that is an almost deterministic calculation.
# ----------- END solarcleaner function. -----------
def rhcleaner(rh):
'''RH values cannot be more than 100 or less than 0.'''
rhout = pd.DataFrame(rh)
rhout = rhout.mask(rhout >= 99, other=np.NaN).mask(
rhout <= 10, other=np.NaN).mask(
np.isnan(rhout), other=np.NaN)
rhout = rhout.interpolate(method='linear')
rhout = rhout.fillna(method='bfill')
return np.squeeze(rhout.values)
# ----------- END rhcleaner function. -----------
def tdpcleaner(tdp, tdb):
if not isinstance(tdp, pd.DataFrame):
tdpout = | pd.DataFrame(tdp) | pandas.DataFrame |
""" You can download the Genre CSVs from the cs-chan/ArtGAN Github
repository. Assuming that you have already downloaded the entire ~28
GB dataset from the provided link, this script removes rows from the
Genre CSV if that image is not present on your local computer. """
from collections import namedtuple
import pathlib
import pandas as pd
# Read CSVs
ROOT = "wikiart/"
train = pd.read_csv(ROOT+"wikiart_csv/genre_train.csv", header=None)
valid = | pd.read_csv(ROOT+"wikiart_csv/genre_val.csv", header=None) | pandas.read_csv |
""" This file contains a class and methods for Non-REM EEG segments
Notes:
- Analysis should output # of NaNs in the data
TO DO:
- For self.detect_spindles(), move attributes into metadata['analysis_info'] dict
- Optimize self.create_spindfs() method
- Assign NREM attributes to slots on init
- Update docstrings
- !! recalculate ISI for 2-hr blocks
- Update export for spindle_psd_i
"""
import datetime
import glob
#import joblib
import json
import os
import numpy as np
import pandas as pd
import warnings
import xlsxwriter
from mne.time_frequency import psd_array_multitaper
from scipy.signal import butter, sosfiltfilt, sosfreqz
from scipy.optimize import OptimizeWarning, curve_fit
class NREM:
""" General class for nonREM EEG segments """
def __init__(self, fname=None, fpath=None, match=None, in_num=None, epoched=False, batch=False):
""" Initialize NREM object
Parameters
----------
fname: str
filename (if loading a single dataframe)
fpath: str
absolute path to file(s) directory
match: str
string to match within the filename of all files to load (Ex: '_s2_')
in_num: str
IN number, for batch loading
epoched: bool (default: False)
whether data has been epoched (if loading a single dataframe)
batch: bool (default: True)
whether to load all matching files from the fpath directory
"""
if batch:
self.load_batch(fpath, match, in_num)
else:
filepath = os.path.join(fpath, fname)
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
self.metadata = {'file_info':{'in_num': in_num, 'fname': fname, 'path': filepath,
'sleep_stage': slpstage,'cycle': cycle} }
if epoched is True:
self.metadata['file_info']['epoch'] = fname.split('_')[4]
self.load_segment()
def load_segment(self):
""" Load eeg segment and extract sampling frequency. """
data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)
# Check cycle length against 5 minute duration minimum
cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()
self.data = data
diff = data.index.to_series().diff()[1:2]
s_freq = 1000000/diff[0].microseconds
self.metadata['file_info']['start_time'] = str(data.index[0])
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}
self.s_freq = s_freq
print('EEG successfully imported.')
def load_batch(self, fpath, match, in_num):
""" Load a batch of EEG segments & reset index from absolute to relative time
TO DO: Throw error if IN doesn't match any files in folder
"""
if in_num == None:
in_num = input('Please specify IN number: ')
if match == None:
match = input('Please specify filename string to match for batch loading (ex. \'_s2_\'): ')
# get a list of all matching files
glob_match = f'{fpath}/*{match}*'
files = glob.glob(glob_match)
# load & concatenate files into a single dataframe
data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()
# extract sampling frequency
s_freq = 1/(data.index[1] - data.index[0]).total_seconds()
# reset the index to continuous time
ind_freq = str(int(1/s_freq*1000000))+'us'
ind_start = '1900-01-01 00:00:00.000'
ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)
data.index = ind
# set metadata & attributes
self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,
'match_phrase': match},
'analysis_info':{'s_freq': s_freq} }
self.data = data
self.s_freq = s_freq
## Spindle Detection Methods ##
# make attributes
def spindle_attributes(self):
""" create attributes for spindle detection """
try:
self.channels
except AttributeError:
# create if doesn't exist
self.channels = [x[0] for x in self.data.columns]
dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute
[setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]
self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])
self.spindle_events = {}
self.spindle_rejects = {}
# step 1: make filter
def make_butter_sp(self, wn, order):
""" Make Butterworth bandpass filter [Parameters/Returns]"""
nyquist = self.s_freq/2
wn_arr=np.asarray(wn)
if np.any(wn_arr <=0) or np.any(wn_arr >=1):
wn_arr = wn_arr/nyquist # must remake filter for each pt bc of differences in s_freq
self.sp_sos = butter(order, wn_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {order}x{order} bandpass = {wn}")
# step 2: filter channels
def spfilt(self, i):
""" Apply Butterworth bandpass to signal by channel """
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
data_notnan_filt = sosfiltfilt(self.sp_sos, data_notnan.to_numpy(), axis=0)
data_notnan['Filt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()
# add channel to main dataframe
self.spfiltEEG[i] = filt_chan
# steps 3-4: calculate RMS & smooth
def rms_smooth(self, i, sp_mw):
""" Calculate moving RMS (rectify) & smooth the EEG """
mw = int(sp_mw*self.s_freq) # convert moving window size from seconds to samples
# convolve for rolling RMS
datsq = np.power(self.spfiltEEG[i], 2)
window = np.ones(mw)/float(mw)
# convolution mode 'valid' will remove edge effects, but also introduce a time shift
# and downstream erors because it changes the length of the rms data
rms = np.sqrt(np.convolve(datsq, window, 'same'))
#spinfilt_RMS = pd.DataFrame(rms, index=self.data.index) --> add this back for > speed
self.spRMS[i] = rms # for > speed, don't store spinfilt_RMS[i] as an attribute
# smooth with moving average
rms_avg = self.spRMS[i].rolling(mw, center=True).mean()
self.spRMSmavg[i] = rms_avg
# step 5: set thresholds
def set_thres(self, i):
""" set spindle detection threshold levels, in terms of multiples of RMS SD """
mean_rms = float(np.mean(self.spRMSmavg[i]))
det_lo = float(mean_rms + self.metadata['spindle_analysis']['sp_loSD']*np.std(self.spRMSmavg[i]))
det_hi = float(mean_rms + self.metadata['spindle_analysis']['sp_hiSD']*np.std(self.spRMSmavg[i]))
self.spThresholds[i] = [mean_rms, det_lo, det_hi]
# step 6: detect spindles
def get_spindles(self, i, min_sep):
# vectorize data for detection looping
lo, hi = self.spThresholds[i]['Low Threshold'], self.spThresholds[i]['High Threshold']
mavg_varr, mavg_iarr = np.asarray(self.spRMSmavg[i]), np.asarray(self.spRMSmavg[i].index)
# initialize spindle event list & set pointer to 0
#self.spindle_events[i] = []
spindle_events = []
x=0
while x < len(self.data):
# if value crosses high threshold, start a fresh spindle
if mavg_varr[x] >= hi:
spindle = []
# count backwards to find previous low threshold crossing
for h in range(x, -1, -1):
# if a nan is encountered before the previous low crossing, break
if np.isnan(mavg_varr[h]):
break
elif mavg_varr[h] >= lo:
spindle.insert(0, mavg_iarr[h]) # add value to the beginning of the spindle
else:
break
# count forwards to find next low threshold crossing
for h in range(x+1, len(self.data), 1):
# if a nan is encountered before the next low crossing, break
if np.isnan(mavg_varr[h]):
break
# if above low threshold, add to current spindle
elif mavg_varr[h] >= lo and x < (len(self.data)-1):
spindle.append(mavg_iarr[h])
# if above low threshold and last value OR if nan, add to current spindle and add spindle to events list
elif (mavg_varr[h] >= lo and x == (len(self.data)-1)) or np.isnan(mavg_varr[h]): ## untested
spindle.append(mavg_iarr[h])
spindle_events.append(spindle)
#self.spindle_events[i].append(spindle)
# otherwise finish spindle & add to spindle events list
elif mavg_varr[h] < lo:
spindle_events.append(spindle)
#self.spindle_events[i].append(spindle)
break
# advance the pointer to the end of the spindle
x = h
# if value doesn't cross high threshold, advance
else:
x += 1
# combine spindles less than min_sep
spindle_events_msep = []
x = 0
while x < len(spindle_events)-1:
# if the following spindle is less than min_sep away
if (spindle_events[x+1][0] - spindle_events[x][-1])/np.timedelta64(1, 's') < min_sep:
# combine the two, append to list, and advance pointer by two
spindle_comb = spindle_events[x] + spindle_events[x+1]
spindle_events_msep.append(spindle_comb)
x += 2
else:
# otherwise, append spindle to list, advance pointer by 1
spindle_events_msep.append(spindle_events[x])
x += 1
self.spindle_events[i] = spindle_events_msep
# step 7: apply rejection criteria
def reject_spins(self, min_chans_r, min_chans_d, duration):
""" Reject spindles that occur over fewer than 3 channels. Apply duration thresholding to
spindles that occur over fewer than X channels.
[chans < min_chans_r = reject; min_chans_r < chans < min_chans_d = apply max/min duration threshold; X < chans = apply max duration threshold]
Parameters
----------
min_chans_r: int
minimum number of channels for spindles to occur accross concurrently to bypass
automatic rejection
min_chans_d: int
minimum number of channels for spindles to occur across concurrently in order to
bypass duration criterion. performs best at 1/4 of total chans
duration: list of float
duration range (seconds) for spindle thresholding
Returns
-------
modified self.spindle_events and self.spindle_rejects attributes
"""
# convert duration from seconds to samples
sduration = [x*self.s_freq for x in duration]
# make boolean mask for spindle presence
spin_bool = pd.DataFrame(index = self.data.index)
for chan in self.spindle_events:
if chan not in ['EOG_L', 'EOG_R', 'EKG']:
spins_flat = [time for spindle in self.spindle_events[chan] for time in spindle]
spin_bool[chan] = np.isin(self.data.index.values, spins_flat)
spin_bool['chans_present'] = spin_bool.sum(axis=1)
# check individual spindles
for chan in self.spindle_events:
self.spindle_rejects[chan] = []
for spin in self.spindle_events[chan]:
# reject if present over less than min_chans_r channels
if not np.any(spin_bool['chans_present'].loc[spin] >= min_chans_r):
self.spindle_rejects[chan].append(spin)
self.spindle_events[chan].remove(spin)
# Apply duration threshold if not present over more than minimum # of channels
elif not np.any(spin_bool['chans_present'].loc[spin] >= min_chans_d):
# apply duration thresholding
if not sduration[0] <= len(spin) <= sduration[1]:
self.spindle_rejects[chan].append(spin)
self.spindle_events[chan].remove(spin)
# Apply max duration threshold to all spindles left (regardless of # of chans)
else:
if len(spin) > sduration[1]:
self.spindle_rejects[chan].append(spin)
self.spindle_events[chan].remove(spin)
# set multiIndex
def spMultiIndex(self):
""" combine dataframes into a multiIndex dataframe"""
# reset column levels
self.spfiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])
self.spRMS.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMS'), len(self.channels))],names=['Channel','datatype'])
self.spRMSmavg.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMSmavg'), len(self.channels))],names=['Channel','datatype'])
# list df vars for index specs
dfs =[self.spfiltEEG, self.spRMS, self.spRMSmavg] # for > speed, don't store spinfilt_RMS as an attribute
calcs = ['Filtered', 'RMS', 'RMSmavg']
lvl0 = np.repeat(self.channels, len(calcs))
lvl1 = calcs*len(self.channels)
# combine & custom sort
self.spindle_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])
def detect_spindles(self, wn=[8, 16], order=4, sp_mw=0.2, loSD=0, hiSD=1.5, min_sep=0.2, duration=[0.5, 3.0], min_chans_r=3, min_chans_d=9):
""" Detect spindles by channel [Params/Returns]
Parameters
----------
min_sep: float (default: 0.1)
minimum separation (in seconds) for spindles to be considered distinct, otherwise combine
Returns
-------
"""
self.metadata['spindle_analysis'] = {'sp_filtwindow': wn, 'sp_filtorder_half': order,
'sp_RMSmw': sp_mw, 'sp_loSD': loSD, 'sp_hiSD': hiSD, 'min_sep': min_sep, 'sp_duration': duration,
'sp_minchans_toskipautoreject': min_chans_r, 'sp_minchans_toskipduration': min_chans_d}
#self.s_freq = self.metadata['analysis_info']['s_freq']
# set attributes
self.spindle_attributes()
# Make filter
self.make_butter_sp(wn, order)
print('Detecting spindles...')
# loop through channels (all channels for plotting ease)
for i in self.channels:
# if i not in ['EOG_L', 'EOG_R', 'EKG']:
#print(f'Detecting spindles on {i}...')
# Filter
self.spfilt(i)
# Calculate RMS & smooth
self.rms_smooth(i, sp_mw)
# Set detection thresholds
self.set_thres(i)
# Detect spindles
self.get_spindles(i, min_sep)
# Apply rejection criteria
print('Pruning spindle detections...')
self.reject_spins(min_chans_r, min_chans_d, duration)
print('Spindle detection complete.')
# combine dataframes
print('Combining dataframes...')
self.spMultiIndex()
print('done.\n')
def create_spindfs(self, zmethod, trough_dtype, buff, buffer_len):
""" Create individual dataframes for individual spindles +/- a timedelta buffer
** NOTE: buffer doesn't have spinso filter incorporated
Parameters
----------
zmethod: str (default: 'trough')
method used to assign 0-center to spindles [options: 'trough', 'middle']. Trough assigns zero-center to
the deepest negative trough. Middle assigns zero center to the midpoint in time.
trough_dtype: str (default: 'spfilt')
Which data to use for picking the most negative trough for centering [options: 'Raw', 'spfilt']
buff: bool (default: False)
calculate spindle dataframes with buffer
buffer_len: int
length in seconds of buffer to calculate around 0-center of spindle
self.spindle_events: dict
dict of timestamps when spindles occur (created from self.detect_spindles())
self.data: pd.DataFrame
df containing raw EEG data
Returns
-------
self.spindles: nested dict of dfs
nested dict with spindle data by channel {channel: {spindle_num:spindle_data}}
self.spindles_wbuffer: nested dict of dfs
nested dict with spindle data w/ timedelta buffer by channel {channel: {spindle_num:spindle_data}}
"""
## create dict of dataframes for spindle analysis
print('Creating individual spindle dataframes...')
self.metadata['spindle_analysis']['zmethod'] = zmethod
self.metadata['spindle_analysis']['trough_datatype'] = trough_dtype
spindles = {}
for chan in self.spindle_events.keys():
spindles[chan] = {}
for i, spin in enumerate(self.spindle_events[chan]):
# create individual df for each spindle
spin_data = self.data[chan]['Raw'].loc[self.spindle_events[chan][i]]
spfilt_data = self.spfiltEEG[chan]['Filtered'].loc[self.spindle_events[chan][i]]
# try:
# spsofilt_data = self.spsofiltEEG[chan]['Filtered'].loc[self.spindle_events[chan][i]]
# # skip spsofilt if not yet calculated (if SO detections haven't been performed)
# except AttributeError:
# pass
# set new index so that each spindle is centered around zero
if zmethod == 'middle':
# this method could use some work
half_length = len(spin)/2
t_id = np.linspace(-half_length, half_length, int(2*half_length//1))
# convert from samples to ms
id_ms = t_id * (1/self.metadata['analysis_info']['s_freq']*1000)
elif zmethod == 'trough' and trough_dtype == 'Raw':
id_ms = (spin_data.index - spin_data.idxmin()).total_seconds()*1000
elif zmethod == 'trough' and trough_dtype == 'spfilt':
id_ms = (spfilt_data.index - spfilt_data.idxmin()).total_seconds()*1000
# create new dataframe
spindles[chan][i] = pd.DataFrame(index=id_ms)
spindles[chan][i].index = [int(x) for x in spindles[chan][i].index]
spindles[chan][i].index.name='id_ms'
spindles[chan][i]['time'] = spin_data.index
spindles[chan][i]['Raw'] = spin_data.values
spindles[chan][i]['spfilt'] = spfilt_data.values
try:
spindle[chan][i]['spsofilt'] = spsofilt_data.values
# skip spsofilt if not yet calculated (if SO detections haven't been performed)
except NameError:
pass
self.spindles = spindles
print('Spindle dataframes created. Spindle data stored in obj.spindles.')
if buff:
# now make buffered dataframes
print(f'Creating spindle dataframes with {buffer_len}s buffer...')
spindles_wbuffer = {}
for chan in self.spindles.keys():
spindles_wbuffer[chan] = {}
for i in self.spindles[chan].keys():
# get +/- buffer length from zero-center of spindle
start = self.spindles[chan][i]['time'].loc[0] - pd.Timedelta(seconds=buffer_len)
end = self.spindles[chan][i]['time'].loc[0] + pd.Timedelta(seconds=buffer_len)
spin_buffer_data = self.data[chan]['Raw'].loc[start:end]
# assign the delta time index
id_ms = (spin_buffer_data.index - self.spindles[chan][i]['time'].loc[0]).total_seconds()*1000
# create new dataframe
spindles_wbuffer[chan][i] = pd.DataFrame(index=id_ms)
spindles_wbuffer[chan][i].index = [int(x) for x in spindles_wbuffer[chan][i].index]
spindles_wbuffer[chan][i].index.name='id_ms'
spindles_wbuffer[chan][i]['time'] = spin_buffer_data.index
spindles_wbuffer[chan][i]['Raw'] = spin_buffer_data.values
self.spindles_wbuffer = spindles_wbuffer
print('Spindle dataframes with buffer stored in obj.spindles_wbuffer.')
def calc_spindle_means(self):
""" Calculate mean, std, and sem at each timedelta from negative spindle peak per channel
Returns
-------
self.spindle_means: nested dict
dictionary of raw and filtered spindle means by channel
format: {'Raw':{channel:pd.DataFrame}}, 'spfilt':{channel:pd.DataFrame}}
"""
print('Aligning spindles...')
# align spindles accoridng to timedelta & combine into single dataframe
spindle_aggregates = {}
datatypes = ['Raw', 'spfilt']
for chan in self.spindles.keys():
# only use channels that have spindles
if self.spindles[chan]:
spindle_aggregates[chan] = {}
for datatype in datatypes:
# set the base df
agg_df = pd.DataFrame(self.spindles[chan][0][datatype])
agg_df = agg_df.rename(columns={datatype:'spin_0'})
rsuffix = list(range(1, len(self.spindles[chan])))
# join on the index for each spindle
agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')
spindle_aggregates[chan][datatype] = agg_df
print('Calculating spindle statistics...')
# create a new multiindex dataframe for calculations
spindle_means = {}
calcs = ['count', 'mean', 'std' ,'sem']
tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]
columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])
for datatype in datatypes:
spindle_means[datatype] = pd.DataFrame(columns=columns)
# fill the dataframe
for chan in spindle_aggregates.keys():
spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)
spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)
spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)
spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)
self.spindle_aggregates = spindle_aggregates
self.spindle_means = spindle_means
print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\n')
def calc_spindle_buffer_means(self):
""" Calculate mean, std, and sem at each timedelta from negative spindle peak per channel
NOTE: This needs to be updated to include datatype parameter to stay aligned with calc_spin_means
Also fix the join command for speed (see above)
"""
print('Aligning spindles...')
# align spindles accoridng to timedelta & combine into single dataframe
spindle_buffer_aggregates = {}
for chan in self.spindles.keys():
# only use channels that have spindles
if self.spindles_wbuffer[chan]:
# set the base df
agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])
rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))
# join on the index for each spindle
for x in range(1, len(self.spindles_wbuffer[chan])):
mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])
spindle_buffer_aggregates[chan] = mean_df
print('Calculating statistics...')
# create a new multiindex dataframe for calculations
calcs = ['mean', 'std' ,'sem']
tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]
columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])
spindle_buffer_means = pd.DataFrame(columns=columns)
# fill the dataframe
for chan in spindle_buffer_aggregates.keys():
spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)
spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)
spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)
self.spindle_buffer_aggregates = spindle_buffer_aggregates
self.spindle_buffer_means = spindle_buffer_means
print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')
def calc_spin_tstats(self, spin_range):
""" calculate time-domain spindle feature statistics
Parameters
----------
spin_range: list of int
spindle frequency range to be used for calculating center frequency
Returns
-------
self.spindle_tstats: pd.DataFrame
MultiIndex dataframe with calculated spindle time statistics
"""
print('Calculating spindle time-domain statistics...')
# create multi-index dataframe
# lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI', 'Power', 'Power']
# lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd', 'center_freq', 'total_pwr']
lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI']
lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd']
columns = pd.MultiIndex.from_arrays([lvl1, lvl2])
spindle_stats = pd.DataFrame(columns=columns)
#exclude non-EEG channels
exclude = ['EOG_L', 'EOG_R', 'EKG']
# fill dataframe
for chan in self.spindles:
if chan not in exclude:
# calculate spindle count
count = len(self.spindles[chan])
if count == 0:
spindle_stats.loc[chan] = [count, None, None, None, None, None, None, None]
else:
# calculate spindle duration
durations = np.array([(self.spindles[chan][spin].time.iloc[-1] - self.spindles[chan][spin].time.iloc[0]).total_seconds() for spin in self.spindles[chan]])
duration_mean = durations.mean()
duration_sd = durations.std()
# calculate amplitude
amplitudes_raw = np.concatenate([self.spindles[chan][x].Raw.values for x in self.spindles[chan]])
amp_rms_raw = np.sqrt(np.array([x**2 for x in amplitudes_raw]).mean())
amp_sd_raw = amplitudes_raw.std()
amplitudes_spfilt = np.concatenate([self.spindles[chan][x].spfilt.values for x in self.spindles[chan]])
amp_rms_spfilt = np.sqrt(np.array([x**2 for x in amplitudes_spfilt]).mean())
amp_sd_spfilt = amplitudes_spfilt.std()
# calculate density
density = count/((self.data.index[-1] - self.data.index[0]).total_seconds()/60)
# calculate inter-spindle-interval (ISI) --> NOT ACCURATE FOR 2HR BLOCKS
isi_arr = np.array([(self.spindles[chan][x+1].time.iloc[0] - self.spindles[chan][x].time.iloc[-1]).total_seconds() for x in self.spindles[chan] if x < len(self.spindles[chan])-1])
isi_mean = isi_arr.mean()
isi_sd = isi_arr.std()
# calculate center frequency & total spindle power
# spindle_power = self.spindle_psd_norm[chan]['normed_pwr'][(self.spindle_psd[chan].index >= spin_range[0]) & (self.spindle_psd[chan].index <= spin_range[1])]
# center_freq = spindle_power.idxmax()
# total_pwr = spindle_power.sum()
spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd]
# spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd, center_freq, total_pwr]
self.spindle_tstats = spindle_stats
print('Spindle time stats stored in obj.spindle_tstats.\n')
def calc_spindle_psd_concat(self, psd_bandwidth):
""" Calculate multitaper power spectrum of concated spindles for each channel
Params
------
bandwidth: float
frequency resolution in Hz
Returns
-------
self.spindle_psd: dict
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each channel
"""
print('Calculating power spectra (this may take a few minutes)...')
self.metadata['spindle_analysis']['psd_dtype'] = 'raw_concat'
self.metadata['spindle_analysis']['psd_method'] = 'multitaper'
self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth
sf = self.metadata['analysis_info']['s_freq']
spindle_psd = {}
spindle_multitaper_calcs = pd.DataFrame(index=['data_len', 'N', 'W', 'NW', 'K'])
for chan in self.spindles:
#print(f'Calculating spectra for {chan}...')
if len(self.spindles[chan]) > 0:
# concatenate spindles
spindles = [self.spindles[chan][x].Raw.values for x in self.spindles[chan]]
data = np.concatenate(spindles)
# record PS params [K = 2NW-1]
N = len(data)/sf
W = psd_bandwidth
K = int((2*N*W)-1)
spindle_multitaper_calcs[chan] = [len(data), N, W, N*W, K]
# calculate power spectrum
pwr, freqs = psd_array_multitaper(data, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
# convert to series & add to dict
psd = pd.Series(pwr, index=freqs)
spindle_psd[chan] = psd
self.spindle_multitaper_calcs = spindle_multitaper_calcs
self.spindle_psd_concat = spindle_psd
print('Done. Spectra stored in obj.spindle_psd_concat. Calculations stored in obj.spindle_multitaper_calcs.\n')
def calc_gottselig_norm(self, norm_range):
""" calculated normalized spindle power on EEG channels (from Gottselig et al., 2002). works with
calc_spindle_psd_concat.
TO DO: change p0 value if optimize warning
Parameters
----------
norm_range: list of tuple
frequency ranges for gottselig normalization
Returns
-------
self.spindle_psd_concat_norm: nested dict
format {chan: pd.Series(normalized power, index=frequency)}
"""
print('Calculating Gottselig normalization...')
def exponential_func(x, a, b, c):
return a*np.exp(-b*x)+c
self.metadata['spindle_analysis']['gottselig_range'] = norm_range
exclude = ['EOG_L', 'EOG_R', 'EKG']
spindle_psd_norm = {}
chans_norm_failed = []
for chan in self.spindle_psd:
if chan not in exclude:
spindle_psd_norm[chan] = {}
# specify data to be fit (only data in norm range)
incl_freqs = np.logical_or(((self.spindle_psd[chan].index >= norm_range[0][0]) & (self.spindle_psd[chan].index <= norm_range[0][1])),
((self.spindle_psd[chan].index >= norm_range[1][0]) & (self.spindle_psd[chan].index <= norm_range[1][1])))
pwr_fit = self.spindle_psd[chan][incl_freqs]
# set x and y values (convert y to dB)
x_pwr_fit = pwr_fit.index
y_pwr_fit = 10 * np.log10(pwr_fit.values)
# fit exponential -- try second fit line if first throws infinite covariance
with warnings.catch_warnings():
warnings.simplefilter("error", OptimizeWarning)
try:
popt, pcov = curve_fit(exponential_func, xdata=x_pwr_fit, ydata=y_pwr_fit, p0=(1, 0, 1))
except (OptimizeWarning, RuntimeError):
try:
popt, pcov = curve_fit(exponential_func, xdata=x_pwr_fit, ydata=y_pwr_fit, p0=(1, 1e-6, 1))
except (OptimizeWarning, RuntimeError):
popt = np.full(3, np.nan)
chans_norm_failed.append(chan)
print(f'scipy.optimize.curvefit encountered RuntimeError on channel {chan}. Normalization skipped for this channel.')
pass
xx = self.spindle_psd[chan].index
yy = exponential_func(xx, *popt)
# subtract the fit line
psd_norm = pd.Series(10*np.log10(self.spindle_psd[chan].values) - yy, index=self.spindle_psd[chan].index)
# save the values
spindle_psd_norm[chan]['normed_pwr'] = psd_norm
spindle_psd_norm[chan]['values_to_fit'] = pd.Series(y_pwr_fit, index=x_pwr_fit)
spindle_psd_norm[chan]['exp_fit_line'] = pd.Series(yy, index=xx)
self.spindle_psd_concat_norm = spindle_psd_norm
self.metadata['spindle_analysis']['chans_concat_norm_failed'] = chans_norm_failed
print('Gottselig normalization data stored in obj.spindle_psd_concat_norm.\n')
def calc_spindle_psd_i(self, psd_bandwidth, zpad=False, zpad_len=3):
""" Calculate multitaper power spectrum for individual spindles across all channels
Params
------
bandwidth: float
frequency resolution in Hz
zpad: bool (default: False)
whether to zeropad the data (for increased spectral resolution)
zpad_len: float
length to zero-pad the data to (in seconds)
Returns
-------
self.spindles_zpad: dict
zero-padded spindle values
self.spindle_psd_i: dict
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: dict of pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each spindle by channel
"""
print('Calculating power spectra (this may take a few minutes)...')
self.metadata['spindle_analysis']['psd_dtype'] = 'raw_individual'
self.metadata['spindle_analysis']['psd_method'] = 'multitaper'
self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth
self.metadata['spindle_analysis']['zeropad'] = zpad
self.metadata['spindle_analysis']['zeropad_len_sec'] = zpad_len
sf = self.metadata['analysis_info']['s_freq']
spindles_zpad = {}
spindle_psd = {}
spindle_multitaper_calcs = {}
for chan in self.spindles:
spindles_zpad[chan] = {}
spindle_psd[chan] = {}
# waveform resolution is dependent on length of signal, regardless of zero-padding
spindle_multitaper_calcs[chan] = pd.DataFrame(columns=['spin_samples', 'spin_seconds', 'zpad_samples', 'zpad_seconds', 'waveform_resoultion_Hz',
'psd_resolution_Hz', 'N_taper_len', 'W_bandwidth', 'K_tapers'])
spindle_multitaper_calcs[chan].index.name = 'spindle_num'
if len(self.spindles[chan]) > 0:
for x in self.spindles[chan]:
# subtract mean to zero-center spindle for zero-padding
data = self.spindles[chan][x].Raw.values - np.mean(self.spindles[chan][x].Raw.values)
zpad_samples=0
zpad_seconds=0
tx=0
# option to zero-pad the spindle
if zpad:
total_len = zpad_len*sf
zpad_samples = total_len - len(data)
zpad_seconds = zpad_samples/sf
if zpad_samples > 0:
padding = np.repeat(0, zpad_samples)
data_pad = np.append(data, padding)
else:
spin_len = len(data)/sf
print(f'Spindle {chan}:{x} length {spin_len} seconds longer than pad length {zpad_len}')
data_pad = data
# or leave as-is
else:
data_pad = data
# record PS params [K = 2NW-1]
spin_samples = len(data)
spin_seconds = len(data)/sf
waveform_res = 1/spin_seconds
psd_res = 1/(len(data_pad)/sf)
N_taper_len = len(data_pad)/sf
W_bandwidth = psd_bandwidth
K_tapers = int((2*N_taper_len*W_bandwidth)-1)
spindle_multitaper_calcs[chan].loc[x] = [spin_samples, spin_seconds, zpad_samples, zpad_seconds, waveform_res, psd_res, N_taper_len, W_bandwidth, K_tapers]
# calculate power spectrum
try:
pwr, freqs = psd_array_multitaper(data_pad, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
except ValueError:
print(f'Specified bandwidth too small for data length. Skipping spindle {chan}:{x}.')
continue
# convert to series & add to dict
psd = pd.Series(pwr, index=freqs)
spindle_psd[chan][x] = psd
spindles_zpad[chan][x] = data_pad
self.spindles_zpad = spindles_zpad
self.spindle_multitaper_calcs = spindle_multitaper_calcs
self.spindle_psd_i = spindle_psd
print('Done. \nSpectra stored in obj.spindle_psd_i. Calculations stored in obj.spindle_multitaper_calcs. Zero-padded spindle data in obj.spindles_zpad.\n')
def analyze_spindles(self, zmethod='trough', trough_dtype='spfilt', buff=False, buffer_len=3, psd_type='i', psd_bandwidth=1.0,
zpad=True, zpad_len=3.0, norm_range=[(4,6), (18, 25)], spin_range=[9, 16]):
"""
Starting code for spindle statistics/visualizations
Parameters
----------
zmethod: str (default: 'trough')
method used to assign 0-center to spindles [options: 'trough', 'middle']. Trough assigns zero-center to
the deepest negative trough. Middle assigns zero center to the midpoint in time.
trough_dtype: str (default: 'spfilt')
Which data to use for picking the most negative trough for centering [options: 'Raw', 'spfilt']
buff: bool (default: False)
calculate spindle data dataframes with a delta time buffer around center of spindle
buffer_len: int
length in seconds of buffer to calculate around 0-center of spindle
psd_type: str (default: 'i')
What data to use for psd calculations [Options: 'i' (individual spindles), 'concat' (spindles concatenated by channel)]
psd_bandwidth: float
frequency bandwidth for power spectra calculations (Hz)
zpad: bool (default: False)
whether to zeropad the spindle data (for increased spectral resolution)
zpad_len: float
length to zero-pad spindles to (in seconds)
norm_range: list of tuple
frequency ranges for gottselig normalization
spin_range: list of int
spindle frequency range to be used for calculating center frequency
Returns
-------
self.spindles: nested dict of dfs
nested dict with spindle data by channel {channel: {spindle_num:spindle_data}}
self.spindles_wbuffer: nested dict of dfs
nested dict with spindle data w/ timedelta buffer by channel {channel: {spindle_num:spindle_data}}
self.spindle_psd_concat: dict
power spectra for concatenated spindles by channel (Only if psd_type == 'concat')
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_psd_concat_norm: nested dict (Only if psd_type == 'concat')
format {chan: pd.Series(normalized power, index=frequency)}
self.spindle_psd_i: nested dict
power spectra for individual spindles by channel (Only if psd_type == 'i')
format {channel: {spindle: pd.Series}} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each channel
self.spindle_features: pd.DataFrame
MultiIndex dataframe with calculated spindle statistics
"""
# create individual datframes for each spindle
self.create_spindfs(zmethod, trough_dtype, buff, buffer_len)
# calculate spindle & spindle buffer means
self.calc_spindle_means()
if buff:
self.calc_spindle_buffer_means()
# run time-domain spindle statistics by channel
self.calc_spin_tstats(spin_range)
# calculate power spectra
if psd_type == 'concat':
# calc psd on concated spindles
self.calc_spindle_psd_concat(psd_bandwidth)
# normalize power spectra for quantification
self.calc_gottselig_norm(norm_range)
elif psd_type == 'i':
# calc psd on individual spindles
self.calc_spindle_psd_i(psd_bandwidth, zpad, zpad_len)
def export_spindles(self, export_dir):
""" Export spindle analyses
NOTE: Update for spindle_psd_i
Parameters
----------
export_dir: str
Directory to save exported files
Returns
-------
export_dir/fname_metadata.txt: json dump file
export_dir/calcs/fname_multitaper_calcs.csv: csv file
export_dir/calcs/fname_spindle_psd.txt: json dump file
export_dir/calcs/fname_spindle_psd_norm.txt: json dump file
export_dir/fname_spindle_aggregates.csv: multi-tab excel file
export_dir/fname_spindle_means.csv: csv file
export_dir/fname_spindle_stats.csv: csv file
"""
print('Exporting spindle analyses..')
# make export directory if doesn't exit
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# make subdirectory for calculations
calc_dir = os.path.join(export_dir, 'calcs')
if not os.path.exists(calc_dir):
os.makedirs(calc_dir)
# set base for savename
fname = self.metadata['file_info']['fname'].split('.')[0]
# dump metadata
filename = f'{fname}_spindle_metadata.txt'
savename = os.path.join(export_dir, filename)
with open(savename, 'w') as f:
json.dump(self.metadata, f, indent=4)
# export multitaper calcs
filename = f'{fname}_spindle_mt_calcs.csv'
savename = os.path.join(calc_dir, filename)
self.spindle_multitaper_calcs.to_csv(savename)
# export psd (concat)
if self.metadata['spindle_analysis']['psd_dtype'] == 'raw_concat':
# convert series to dicts for json dump
psd_export = {}
for name, series in self.spindle_psd.items():
psd_export[name] = series.to_dict()
filename = f'{fname}_spindle_psd_concat.txt'
savename = os.path.join(calc_dir, filename)
with open(savename, 'w') as f:
json.dump(psd_export, f, indent=4)
# export psd norm
# convert series to dicts for json dump
psd_norm_export = {}
for chan in self.spindle_psd_norm.keys():
psd_norm_export[chan]={}
for name, series in self.spindle_psd_norm[chan].items():
psd_norm_export[chan][name] = series.to_dict()
filename = f'{fname}_spindle_psd_norm.txt'
savename = os.path.join(calc_dir, filename)
with open(savename, 'w') as f:
json.dump(psd_norm_export, f, indent=4)
# export psd (individual)
### INDIVIDUAL EXPORT HERE
# export spindle aggregates
filename = f'{fname}_spindle_aggregates.xlsx'
savename = os.path.join(export_dir, filename)
writer = pd.ExcelWriter(savename, engine='xlsxwriter')
for chan in self.spindle_aggregates.keys():
for dtype in self.spindle_aggregates[chan].keys():
tab = '_'.join([chan, dtype])
self.spindle_aggregates[chan][dtype].to_excel(writer, sheet_name=tab)
writer.save()
# export spindle means
for dtype in self.spindle_means.keys():
filename = f'{fname}_spindle_means_{dtype}.csv'
savename = os.path.join(export_dir, filename)
self.spindle_means[dtype].to_csv(savename)
# export spindle stats
filename = f'{fname}_spindle_stats.csv'
savename = os.path.join(export_dir, filename)
self.spindle_stats.to_csv(savename)
print(f'Export complete. Analyses stored in {export_dir}')
## Slow Oscillation Detection Methods ##
def so_attributes(self):
""" make attributes for slow oscillation detection """
try:
self.channels
except AttributeError:
# create if doesn't exist
self.channels = [x[0] for x in self.data.columns]
dfs = ['sofiltEEG', 'spsofiltEEG']
[setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]
self.so_events = {}
self.so_rejects = {}
def make_butter_so(self, wn, order):
""" Make Butterworth bandpass filter [Parameters/Returns]"""
nyquist = self.s_freq/2
wn_arr = np.asarray(wn)
if np.any(wn_arr <=0) or np.any(wn_arr >=1):
wn_arr = wn_arr/nyquist # must remake filter for each pt bc of differences in s_freq
self.so_sos = butter(order, wn_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {order}x{order}, bandpass = {wn}")
def make_butter_spso(self, spso_wn_pass, spso_wn_stop, spso_order):
""" Make Butterworth bandpass and bandstop filter [Parameters/Returns]"""
nyquist = self.s_freq/2
wn_pass_arr = np.asarray(spso_wn_pass)
wn_stop_arr = np.asarray(spso_wn_stop)
# must remake filter for each pt bc of differences in s_freq
if np.any(wn_pass_arr <=0) or np.any(wn_pass_arr >=1):
wn_pass_arr = wn_pass_arr/nyquist
if np.any(wn_stop_arr <=0) or np.any(wn_stop_arr >=1):
wn_stop_arr = wn_stop_arr/nyquist
self.spso_sos_bandstop = butter(spso_order, wn_stop_arr, btype='bandstop', output='sos')
self.spso_sos_bandpass = butter(spso_order, wn_pass_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {spso_order}x{spso_order} bandpass = {spso_wn_pass}")
print(f"Zero phase butterworth filter successfully created: order = {spso_order}x{spso_order} bandstop = {spso_wn_stop}")
def sofilt(self, i):
""" Apply Slow Oscillation Butterworth bandpass to signal by channel
Parameters
----------
i : str
channel to filter
Returns
-------
self.sofiltEEG: pandas.DataFrame
filtered EEG data
"""
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
data_notnan_filt = sosfiltfilt(self.so_sos, data_notnan.to_numpy(), axis=0)
data_notnan['SOFilt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['SOFilt']).sort_index()
# add channel to main dataframe
self.sofiltEEG[i] = filt_chan
def spsofilt(self, i):
""" Apply Butterworth bandpass-bandstop to signal by channel """
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
## bandpass
data_notnan_bandpassed = sosfiltfilt(self.spso_sos_bandpass, data_notnan.to_numpy(), axis=0)
## now bandstop
data_notnan_filt = sosfiltfilt(self.spso_sos_bandstop, data_notnan_bandpassed, axis=0)
data_notnan['Filt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()
# add channel to main dataframe
self.spsofiltEEG[i] = filt_chan
def get_so(self, i, posx_thres, npeak_thres, negpos_thres):
""" Detect slow oscillations. Based on detection algorithm from Molle 2011
Parameters
----------
posx_thres: list of float (default: [0.9, 2])
threshold of consecutive positive-negative zero crossings in seconds
npeak_thres: int (default: -80)
negative peak threshold in microvolts
negpos_thres: int (default: 140)
minimum amplitude threshold for negative to positive peaks
"""
self.so_events[i] = {}
n = 0
# convert thresholds (time to timedelta & uv to mv)
posx_thres_td = [ | pd.Timedelta(s, 's') | pandas.Timedelta |
"""Tests for cv function."""
# --------------------------- Import libraries and functions --------------------------
import pandas as pd
import pytest
from numpy import inf
from pandas.testing import assert_frame_equal
from pycvcqv.cv import coefficient_of_variation
def test_cv_with_kwarg():
"""Tests cv function without correction with data kwarg."""
assert (
coefficient_of_variation(
data=pd.Series(
[
0.2,
0.5,
1.1,
1.4,
1.8,
2.3,
2.5,
2.7,
3.5,
4.4,
4.6,
5.4,
5.4,
5.7,
5.8,
5.9,
6.0,
6.6,
7.1,
7.9,
]
),
multiplier=100,
)
== pytest.approx(57.77, 0.001)
)
def test_cv_without_kwarg():
"""Tests cv function without correction without data kwarg."""
assert (
coefficient_of_variation(
[
0.2,
0.5,
1.1,
1.4,
1.8,
2.3,
2.5,
2.7,
3.5,
4.4,
4.6,
5.4,
5.4,
5.7,
5.8,
5.9,
6.0,
6.6,
7.1,
7.9,
],
multiplier=100,
)
== pytest.approx(57.77, 0.001)
)
def test_cv_corrected():
"""Tests cv function with correction."""
assert (
coefficient_of_variation(
data=pd.Series(
[
0.2,
0.5,
1.1,
1.4,
1.8,
2.3,
2.5,
2.7,
3.5,
4.4,
4.6,
5.4,
5.4,
5.7,
5.8,
5.9,
6.0,
6.6,
7.1,
7.9,
]
),
correction=True,
multiplier=100,
)
== pytest.approx(58.05, 0.001)
)
def test_cv_nonnumeric_type_data_with_kwarg():
"""Tests cv function with nonnumeric type of data with kwarg."""
with pytest.raises(TypeError) as execinfo:
coefficient_of_variation(
data=pd.Series(["0.2", "0.5", "1.1", "1.4", "1.8", "2.3", "2.5", " 2.7"]),
correction=True,
multiplier=100,
)
assert execinfo.value.args[0] == "The data is not numeric!"
def test_cv_nonnumeric_type_data_without_kwarg():
"""Tests cv function with nonnumeric type of data without kwarg."""
with pytest.raises(TypeError) as execinfo:
coefficient_of_variation(
["0.2", "0.5", "1.1", "1.4", "1.8", "2.3", "2.5", " 2.7"],
correction=True,
multiplier=100,
)
assert execinfo.value.args[0] == "The data is not numeric!"
def test_cv_dataframe_single_thread():
"""Tests cv function for dataframe when num_threads is default."""
data = pd.DataFrame(
{
"col-1": pd.Series([0.2, 0.5, 1.1, 1.4, 1.8, 2.3, 2.5, 2.7, 3.5]),
"col-2": pd.Series([5.4, 5.4, 5.7, 5.8, 5.9, 6.0, 6.6, 7.1, 7.9]),
}
)
result = coefficient_of_variation(data)
assert_frame_equal(
result,
pd.DataFrame(
{
"columns": pd.Series(["col-1", "col-2"]),
"cv": pd.Series([0.6076, 0.1359]),
}
),
)
def test_cv_dataframe_zerothread():
"""Tests cv function for dataframe when num_threads is zero."""
data = pd.DataFrame(
{
"col-1": pd.Series([0.2, 0.5, 1.1, 1.4, 1.8, 2.3, 2.5, 2.7, 3.5]),
"col-2": pd.Series([5.4, 5.4, 5.7, 5.8, 5.9, 6.0, 6.6, 7.1, 7.9]),
}
)
result = coefficient_of_variation(data=data, num_threads=0)
assert_frame_equal(
result,
pd.DataFrame(
{
"columns": pd.Series(["col-1", "col-2"]),
"cv": pd.Series([0.6076, 0.1359]),
}
),
)
def test_cv_dataframe_multithread():
"""Tests cv function for dataframe when num_threads is multi."""
data = pd.DataFrame(
{
"col-1": pd.Series([0.2, 0.5, 1.1, 1.4, 1.8, 2.3, 2.5, 2.7, 3.5]),
"col-2": pd.Series([5.4, 5.4, 5.7, 5.8, 5.9, 6.0, 6.6, 7.1, 7.9]),
}
)
result = coefficient_of_variation(data=data, num_threads=-1)
assert_frame_equal(
result,
pd.DataFrame(
{
"columns": pd.Series(["col-1", "col-2"]),
"cv": | pd.Series([0.6076, 0.1359]) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
| assert_panel_equal(wp, wp2) | pandas.util.testing.assert_panel_equal |
# -*- coding: utf-8 -*-
from datetime import datetime
from io import StringIO
import re
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
from pandas.util import testing as tm
import pandas.io.formats.format as fmt
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex"
" ea commodo consequat. Duis aute irure dolor in reprehenderit in"
" voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
" sint occaecat cupidatat non proident, sunt in culpa qui officia"
" deserunt mollit anim id est laborum.")
def expected_html(datapath, name):
"""
Read HTML file from formats data directory.
Parameters
----------
datapath : pytest fixture
The datapath fixture injected into a test by pytest.
name : str
The name of the HTML file without the suffix.
Returns
-------
str : contents of HTML file.
"""
filename = '.'.join([name, 'html'])
filepath = datapath('io', 'formats', 'data', 'html', filename)
with open(filepath, encoding='utf-8') as f:
html = f.read()
return html.rstrip()
@pytest.fixture(params=['mixed', 'empty'])
def biggie_df_fixture(request):
"""Fixture for a big mixed Dataframe and an empty Dataframe"""
if request.param == 'mixed':
df = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
df.loc[:20, 'A'] = np.nan
df.loc[:20, 'B'] = np.nan
return df
elif request.param == 'empty':
df = DataFrame(index=np.arange(200))
return df
@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
def justify(request):
return request.param
@pytest.mark.parametrize('col_space', [30, 50])
def test_to_html_with_col_space(col_space):
df = DataFrame(np.random.random(size=(1, 3)))
# check that col_space affects HTML generation
# and be very brittle about it.
result = df.to_html(col_space=col_space)
hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
assert len(hdrs) > 0
for h in hdrs:
assert "min-width" in h
assert str(col_space) in h
def test_to_html_with_empty_string_label():
# GH 3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
result = df.to_html()
assert "rowspan" not in result
@pytest.mark.parametrize('df,expected', [
(DataFrame({'\u03c3': np.arange(10.)}), 'unicode_1'),
(DataFrame({'A': ['\u03c3']}), 'unicode_2')
])
def test_to_html_unicode(df, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html()
assert result == expected
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({'A': [6.0, 3.1, 2.2]})
result = df.to_html(decimal=',')
expected = expected_html(datapath, 'gh12031_expected_output')
assert result == expected
@pytest.mark.parametrize('kwargs,string,expected', [
(dict(), "<type 'str'>", 'escaped'),
(dict(escape=False), "<b>bold</b>", 'escape_disabled')
])
def test_to_html_escaped(kwargs, string, expected, datapath):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: string,
b: string},
'co>l2': {a: string,
b: string}}
result = DataFrame(test_dict).to_html(**kwargs)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('index_is_named', [True, False])
def test_to_html_multiindex_index_false(index_is_named, datapath):
# GH 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)
})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
if index_is_named:
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh8452_expected_output')
assert result == expected
@pytest.mark.parametrize('multi_sparse,expected', [
(False, 'multiindex_sparsify_false_multi_sparse_1'),
(False, 'multiindex_sparsify_false_multi_sparse_2'),
(True, 'multiindex_sparsify_1'),
(True, 'multiindex_sparsify_2')
])
def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
if expected.endswith('2'):
df.columns = index[::2]
with option_context('display.multi_sparse', multi_sparse):
result = df.to_html()
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('max_rows,expected', [
(60, 'gh14882_expected_output_1'),
# Test that ... appears in a middle level
(56, 'gh14882_expected_output_2')
])
def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
# GH 14882 - Issue on truncation with odd length DataFrame
index = MultiIndex.from_product([[100, 200, 300],
[10, 20, 30],
[1, 2, 3, 4, 5, 6, 7]],
names=['a', 'b', 'c'])
df = DataFrame({'n': range(len(index))}, index=index)
result = df.to_html(max_rows=max_rows)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('df,formatters,expected', [
(DataFrame(
[[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4)),
{'__index__': lambda x: 'abcd' [x]},
'index_formatter'),
(DataFrame(
{'months': [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
{'months': lambda x: x.strftime('%Y-%m')},
'datetime64_monthformatter'),
(DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')}),
{'hod': lambda x: x.strftime('%H:%M')},
'datetime64_hourformatter')
])
def test_to_html_formatters(df, formatters, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html(formatters=formatters)
assert result == expected
def test_to_html_regression_GH6098():
df = DataFrame({
'clé1': ['a', 'a', 'b', 'b', 'a'],
'clé2': ['1er', '2ème', '1er', '2ème', '1er'],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=['clé1'], columns=['clé2'])._repr_html_()
def test_to_html_truncate(datapath):
index = pd.date_range(start='20010101', freq='D', periods=20)
df = DataFrame(index=index, columns=range(20))
result = df.to_html(max_rows=8, max_cols=4)
expected = expected_html(datapath, 'truncate')
assert result == expected
@pytest.mark.parametrize('sparsify,expected', [
(True, 'truncate_multi_index'),
(False, 'truncate_multi_index_sparse_off')
])
def test_to_html_truncate_multi_index(sparsify, expected, datapath):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('option,result,expected', [
(None, lambda df: df.to_html(), '1'),
(None, lambda df: df.to_html(border=0), '0'),
(0, lambda df: df.to_html(), '0'),
(0, lambda df: df._repr_html_(), '0'),
])
def test_to_html_border(option, result, expected):
df = DataFrame({'A': [1, 2]})
if option is None:
result = result(df)
else:
with option_context('display.html.border', option):
result = result(df)
expected = 'border="{}"'.format(expected)
assert expected in result
def test_display_option_warning():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.options.html.border
@pytest.mark.parametrize('biggie_df_fixture', ['mixed'], indirect=True)
def test_to_html(biggie_df_fixture):
# TODO: split this test
df = biggie_df_fixture
s = df.to_html()
buf = StringIO()
retval = df.to_html(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
df.to_html(columns=['B', 'A'], col_space=17)
df.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
df.to_html(columns=['B', 'A'], float_format=str)
df.to_html(columns=['B', 'A'], col_space=12, float_format=str)
@pytest.mark.parametrize('biggie_df_fixture', ['empty'], indirect=True)
def test_to_html_empty_dataframe(biggie_df_fixture):
df = biggie_df_fixture
df.to_html()
def test_to_html_filename(biggie_df_fixture, tmpdir):
df = biggie_df_fixture
expected = df.to_html()
path = tmpdir.join('test.html')
df.to_html(path)
result = path.read()
assert result == expected
def test_to_html_with_no_bold():
df = DataFrame({'x': np.random.randn(5)})
html = df.to_html(bold_rows=False)
result = html[html.find("</thead>")]
assert '<strong' not in result
def test_to_html_columns_arg():
df = DataFrame(tm.getSeriesData())
result = df.to_html(columns=['A'])
assert '<th>B</th>' not in result
@pytest.mark.parametrize('columns,justify,expected', [
(MultiIndex.from_tuples(
list(zip(np.arange(2).repeat(2), np.mod(lrange(4), 2))),
names=['CL0', 'CL1']),
'left',
'multiindex_1'),
(MultiIndex.from_tuples(
list(zip(range(4), np.mod(lrange(4), 2)))),
'right',
'multiindex_2')
])
def test_to_html_multiindex(columns, justify, expected, datapath):
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify=justify)
expected = expected_html(datapath, expected)
assert result == expected
def test_to_html_justify(justify, datapath):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify=justify)
expected = expected_html(datapath, 'justify').format(justify=justify)
assert result == expected
@pytest.mark.parametrize("justify", ["super-right", "small-left",
"noinherit", "tiny", "pandas"])
def test_to_html_invalid_justify(justify):
# GH 17527
df = | DataFrame() | pandas.DataFrame |
"""A word-level Sequence to sequence model in Keras.
Adapted from:
- https://github.com/keras-team/keras/blob/master/examples/lstm_seq2seq.py
- https://wanasit.github.io/english-to-katakana-using-sequence-to-sequence-in-keras.html
- https://github.com/devm2024/nmt_keras/blob/master/base.ipynb
Summary
-------
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence word)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next word
- Sample the next word using these predictions (simply use argmax).
- Append the sampled word to the target sequence
- Repeat until we generate the end-of-sequence word.
References
----------
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
For more ideas on hyperparameter search:
- Massive Exploration of Neural Machine Translation Architectures, 2017
https://arxiv.org/pdf/1703.03906.pdf
"""
from __future__ import print_function
import os
import random
import keras
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import Model
from keras.layers import Input, LSTM, Dense, Bidirectional, Concatenate, Embedding
from keras.layers import Activation, dot, concatenate, TimeDistributed
import pandas as pd
import argparse
import dataloader
# TODO Some questions:
# -- Does it make sense to apply dropout to both encoder and decoder?
def make_model(num_unique_input_chars,
num_unique_target_chars,
latent_dim,
dropout_encoder,
dropout_decoder,
rec_dropout,
has_attention=False,
src_embedding_matrix=None,
tgt_embedding_matrix=None,
trainable_src_emb=False,
trainable_tgt_emb=False):
"""Create the LSTM encoder-decoder model."""
if src_embedding_matrix is not None:
src_embedding_matrix = [src_embedding_matrix]
if tgt_embedding_matrix is not None:
tgt_embedding_matrix = [tgt_embedding_matrix]
# ENCODER ARCHITECTURE
######################
encoder_raw_inputs = Input(shape=(None,))
#encoder_inputs = Embedding(num_unique_input_chars, embedding_size)(encoder_raw_inputs)
denc = Embedding(num_unique_input_chars,
embedding_size,
name = 'encoder_embedding',
weights = src_embedding_matrix, #[embedding_matrix]
trainable = trainable_src_emb
)
encoder_inputs = denc(encoder_raw_inputs)
# encoder_inputs = Input(shape=(None, num_unique_input_chars),
# name='input_encoder')
lstm_layer = LSTM(latent_dim,
name='lstm_encoder',
dropout=dropout_encoder,
recurrent_dropout=rec_dropout,
return_state=True,
return_sequences=True)
enc_outs, state_h, state_c = lstm_layer(encoder_inputs)
encoder_states = [state_h, state_c]
# DECODER ARCHITECTURE -- use `encoder_states` as initial state.
######################
# decoder_inputs = Input(shape=(None, num_unique_target_chars),
# name='input_decoder')
decoder_raw_inputs = Input(shape=(None,), name='input_decoder')
dex = Embedding(num_unique_target_chars,
embedding_size,
name='decoder_embedding',
weights = tgt_embedding_matrix,
trainable = trainable_tgt_emb
)
decoder_inputs = dex(decoder_raw_inputs) #final_dex
# The decoder will return both full output sequences and internal states.
# Return states will be used in inference, not in training.
decoder_lstm = LSTM(latent_dim,
name='lstm_decoder',
dropout=dropout_decoder,
recurrent_dropout=rec_dropout,
return_sequences=True,
return_state=True)
decoder_lstm_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
if has_attention:
# The following equation numbers are from Luong et al., section 3.1.
score = dot([decoder_lstm_outputs, enc_outs], axes=[2, 2]) # Eq. (7)
# The output is a rank-3 tensor, where first dim= number of instances,
# The second dim is max_target_sentence_length, and the third dim is
# max_source_sentence_length. Entry i,j,k corresponds to instance i, time-step
# j of the decoder, timestep k of the encoder.
attention = Activation('softmax', name='attention')(score) # Eq. (7)
# Row i,j,: are the weights for the ith sample, for the jth timestep of
# the decoder. The softmax normalized them. There are
# max_source_sentence_length weights in the row.
context = dot([attention, enc_outs], axes=[2, 1])
# Row i,j,: is the context vector for instance i, decoder timestep j,
# ie. weighted average (using attention weights) of the encoder hidden
# states.
# Eq. (5):
decoder_combined = concatenate([context, decoder_lstm_outputs])
output = TimeDistributed(Dense(latent_dim,
activation="tanh"))(decoder_combined)
# Eq. (6): the conditional probabilities
decoder_outputs = TimeDistributed(Dense(num_unique_target_chars,
activation="softmax"))(output)
#model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model = Model([encoder_raw_inputs, decoder_raw_inputs], decoder_outputs)
else:
decoder_dense = Dense(num_unique_target_chars,
activation='softmax')
decoder_outputs = decoder_dense(decoder_lstm_outputs)
# model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model = Model([encoder_raw_inputs, decoder_raw_inputs], decoder_outputs)
return model
#NOTE: later remove the num_unique_input_chars ; it is not being used.
def make_encoder(model, num_unique_input_chars, has_attention=False):
"""."""
# encoder_inputs = model.input[0]
encoder_raw_inputs = model.input[0]
#encoder_inputs = Embedding(num_unique_input_chars, embedding_size)(encoder_raw_inputs)
denc = model.get_layer('encoder_embedding')
encoder_inputs = denc(encoder_raw_inputs)
outputs = model.get_layer(name='lstm_encoder').output
encoder_states = outputs[1:]
out = outputs[0]
if has_attention:
encoder_out = [out]+encoder_states
else:
encoder_out = encoder_states
# encoder_model = Model(encoder_inputs, encoder_out)
encoder_model = Model(encoder_raw_inputs, encoder_out)
return encoder_model
def make_decoder(model, has_attention=False):
"""."""
latent_dim = model.get_layer(name='lstm_encoder').output_shape[0][-1]
# num_unique_target_chars = model.get_layer(name='input_decoder').input_shape[2]
decoder_states_inputs = [Input(shape=(latent_dim,)),
Input(shape=(latent_dim,))]
# decoder_inputs2 = Input(shape=(None, num_unique_target_chars))
decoder_raw_inputs2 = Input(shape=(None,))
dex = model.get_layer('decoder_embedding')
decoder_inputs2 = dex(decoder_raw_inputs2) # final_dex2
decoder_lstm = model.get_layer('lstm_decoder')
decoder_lstm_outputs, state_h, state_c = decoder_lstm(
decoder_inputs2,
initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
if has_attention:
enc_outs = Input(shape=(None, latent_dim))
score = dot([decoder_lstm_outputs, enc_outs], axes=[2, 2])
attention = Activation('softmax', name='attention')(score)
context = dot([attention, enc_outs], axes=[2, 1])
decoder_combined = concatenate([context, decoder_lstm_outputs])
dense_0 = model.layers[-2]
dense_1 = model.layers[-1]
output = dense_0(decoder_combined)
decoder_outputs = dense_1(output)
decoder_model = Model([decoder_raw_inputs2] + decoder_states_inputs + [enc_outs],
[decoder_outputs] + decoder_states)
# decoder_model = Model([decoder_inputs2] + decoder_states_inputs + [enc_outs],
# [decoder_outputs] + decoder_states)
else:
decoder_dense = model.layers[-1]
decoder_outputs = decoder_dense(decoder_lstm_outputs)
# decoder_model = Model([decoder_inputs2] + decoder_states_inputs,
# [decoder_outputs] + decoder_states)
decoder_model = Model([decoder_raw_inputs2] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
return decoder_model
def fit_model(model,
clipnorm,
learning_rate,
optimizer_name,
encoder_input_train,
decoder_input_train,
decoder_target_train,
encoder_input_val,
decoder_input_val,
decoder_target_val,
save_filename,
save_checkpoint_epochs):
"""."""
if optimizer_name == 'adam':
epsilon = 1e-07 #8e-07 # The keras default value was 1e-07 (K.epsilon())
if clipnorm is not None:
opt = keras.optimizers.Adam(clipnorm=clipnorm,
lr=learning_rate,
epsilon=epsilon)
else:
opt = keras.optimizers.Adam(epsilon=epsilon,
lr=learning_rate)
else:
raise NotImplementedError("Use optimizer_name = 'adam' for now.")
model.compile(optimizer=opt,
metrics=['accuracy'],
loss='categorical_crossentropy')
#early_stopping = EarlyStopping(monitor='val_loss', patience=10)
checkpointer = ModelCheckpoint(filepath='word_models/weights.{epoch:02d}.h5',
verbose=0,
save_weights_only=False,
save_best_only=False,
period=save_checkpoint_epochs)
#NOTE: to feed it more data, think about replacing it with fit_generator
# see https://github.com/keras-team/keras/issues/2708
history = model.fit([encoder_input_train, decoder_input_train],
decoder_target_train,
batch_size=batch_size,
epochs=epochs,
#callbacks=[early_stopping, checkpointer],
callbacks = [checkpointer],
validation_data = ([encoder_input_val, decoder_input_val],
decoder_target_val)
)
##validation_split=0.2)
model.save('word_models/'+save_filename)
return history
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='Train a word-level LSTM seq2seq model.')
parser = argparse.ArgumentParser(description='Train a word-level LSTM seq2seq model.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train_src', type=str, help='File with source data for training.')
parser.add_argument('--train_tgt', type=str, help='File with target data for training.')
parser.add_argument('--valid_src', type=str, help='File with source data for validation.')
parser.add_argument('--valid_tgt', type=str, help='File with target data for validation.')
parser.add_argument('--test_src', type=str, help='File with source data for testing.')
parser.add_argument('--test_tgt', type=str, help='File with target data for testing.')
parser.add_argument('--epochs', type=int, help='Number of epochs to train.')
parser.add_argument('--save_checkpoint_epochs', type=int, help='Save checkpoint every N epochs.', default=5)
parser.add_argument('--num_samples_train', type=int, help='Number of training samples. Use 0 to use all of them.', default=0)
parser.add_argument('--num_samples_val', type=int, help='Number of validation samples. Use 0 to use all of them.', default=0)
parser.add_argument('--emb_file_enc', type=str, help='File with word embeddings for encoder. Use None if you do not wish to use pretrained embeddings.', default="embs/ft-embs-all-lower.vec")
parser.add_argument('--emb_file_dec', type=str, help='File with word embeddings for decoder. Use None if you do not wish to use pretrained embeddings.', default="embs/ft-embs-all-lower.vec")
parser.add_argument('--word_vec_size', type=int, help='Word embedding dimension.', default=300)
parser.add_argument('--latent_dim', type=int, help='Number of hidden units for LSTM.', default=256)
parser.add_argument('--dropout_encoder', type=float, help='Fraction of units to dropout for encoder.', default=0.3)
parser.add_argument('--dropout_decoder', type=float, help='Fraction of units to dropout for decoder.', default=0.3)
parser.add_argument('--batch_size', type=int, help='Batch size.', default=256)
parser.add_argument('--learning_rate', type=float, help='Learning rate.', default=0.001)
parser.add_argument("--trainable_src_emb", help="Train source embedding. Will be frozen by default.",
action="store_true")
parser.add_argument("--trainable_tgt_emb", help="Train target embedding. Will be frozen by default.",
action="store_true")
parser.add_argument("--attention", help="Turn on Luong attention. Will be off by default.",
action="store_true")
args = parser.parse_args()
has_attention = args.attention
embedding_size = args.word_vec_size #300 #100 #300
latent_dim = args.latent_dim #256 # 256 #256 # Could try 512, but slower
dropout_encoder = args.dropout_encoder #0.3 # 0.3
dropout_decoder = args.dropout_decoder
batch_size = args.batch_size #256 #128 #256 # See Neishi et al, 'A Bag of Useful Tricks' (2017)
learning_rate = args.learning_rate #0.001 #0.0005 # 0.0001 # Keras default was 0.001
epochs = args.epochs #3#50 #25# 200 # 200 # 100 was too low
save_checkpoint_epochs = args.save_checkpoint_epochs
MODEL_NAME = 's2s.h5'
clipnorm = None
rec_dropout = 0.0
optimizer_name = 'adam'
training_data_path_src = args.train_src
training_data_path_tgt = args.train_tgt
validation_data_path_src = args.valid_src
validation_data_path_tgt = args.valid_tgt
test_data_path_src = args.test_src
test_data_path_tgt = args.test_tgt
trainable_src_emb = args.trainable_src_emb
trainable_tgt_emb = args.trainable_tgt_emb
src_emb_file = args.emb_file_enc
tgt_emb_file = args.emb_file_dec
if src_emb_file=='None':
src_emb_file = None
if tgt_emb_file=='None':
tgt_emb_file = None
num_samples_train = args.num_samples_train
num_samples_val = args.num_samples_val
if num_samples_train == 0:
num_samples_train = None
if num_samples_val == 0:
num_samples_val = None
#training_data_path_src = "hyp_data2/src-train.txt"
#training_data_path_tgt = "hyp_data2/tgt-train.txt"
#validation_data_path_src = "hyp_data2/src-val.txt"
#validation_data_path_tgt = "hyp_data2/tgt-val.txt"
#src_emb_file = "embs/ft-embs-all-lower.vec" # Use None to not use pretrained
#tgt_emb_file = "embs/ft-embs-all-lower.vec"
#trainable_src_emb=False
#trainable_tgt_emb=False
#num_samples_train = 15000 #0 #10 #3000 #20000 #35000 # 50000 # 250000 is too large; 50000 was OK.
#num_samples_val = None
train_data = dataloader.load_data((training_data_path_src, training_data_path_tgt), numlines=num_samples_train)
val_data = dataloader.load_data((validation_data_path_src, validation_data_path_tgt), numlines=num_samples_val)
test_data = dataloader.load_data((test_data_path_src, test_data_path_tgt), numlines=num_samples_val)
#train_and_val = pd.concat([train_data, val_data], ignore_index=True)
train_and_val_and_test = | pd.concat([train_data, val_data, test_data], ignore_index=True) | pandas.concat |
from abc import ABCMeta, abstractmethod
import numpy as np
import os
import pandas as pd
from covid_xprize.standard_predictor.xprize_predictor import XPrizePredictor
import time
SEED = 0
DEFAULT_TEST_COST = 'covid_xprize/validation/data/uniform_random_costs.csv'
TEST_CONFIGS = [
# ('Default', {'start_date': '2020-08-01', 'end_date': '2020-08-05', 'costs': DEFAULT_TEST_COST}),
# ('Jan_Mar_EC_fast', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'equal', 'selected_geos': ['Canada', 'United States', 'United States / Texas']}),
# ('Jan_Mar_RC_fast', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random', 'selected_geos': ['Canada', 'United States', 'United States / Texas']}),
('EQUAL', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'equal'}),
('RANDOM1', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM2', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM3', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM4', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM5', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM6', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM7', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM8', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM9', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('RANDOM10', {'start_date': '2021-01-01', 'end_date': '2021-03-31', 'costs': 'random'}),
('Jan_RC_NoDec_fast', {'start_date': '2021-01-01', 'end_date': '2021-01-31', 'train_end_date': '2020-11-30', 'costs': 'random', 'selected_geos': ['Canada', 'United States', 'United States / Texas']}),
]
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(ROOT_DIR, os.pardir, 'data')
OXFORD_FILEPATH = os.path.join(DATA_DIR, 'OxCGRT_latest.csv')
OXFORD_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
ADDITIONAL_CONTEXT_FILE = os.path.join(DATA_DIR, "Additional_Context_Data_Global.csv")
ADDITIONAL_US_STATES_CONTEXT = os.path.join(DATA_DIR, "US_states_populations.csv")
ADDITIONAL_UK_CONTEXT = os.path.join(DATA_DIR, "uk_populations.csv")
US_PREFIX = "United States / "
COUNTRY_LIST = os.path.join(DATA_DIR, 'countries_regions.txt')
PREDICTOR_PATH = 'covid_xprize/standard_predictor/models/trained_model_weights.h5'
CONTEXT_COLUMNS = ['CountryName',
'RegionName',
'GeoID',
'Date',
'ConfirmedCases',
'ConfirmedDeaths',
'Population']
NPI_MAX_VALUES = {
'C1_School closing': 3,
'C2_Workplace closing': 3,
'C3_Cancel public events': 2,
'C4_Restrictions on gatherings': 4,
'C5_Close public transport': 2,
'C6_Stay at home requirements': 3,
'C7_Restrictions on internal movement': 2,
'C8_International travel controls': 4,
'H1_Public information campaigns': 2,
'H2_Testing policy': 3,
'H3_Contact tracing': 2,
'H6_Facial Coverings': 4
}
NPI_COLUMNS = list(NPI_MAX_VALUES.keys())
CASES_COL = ['NewCases']
PRED_CASES_COL = ['PredictedDailyNewCases']
def gen_test_config(start_date=None,
end_date=None,
train_start_date=None,
train_end_date=None,
costs='random',
selected_geos=COUNTRY_LIST,
predictor=None,
update_data=False):
"""
Loads the data and splits it into train and test sets
Args:
start_date: first date to prescribe for
end_date: last date to prescribe for
train_start_date: first date in the returned train_df
train_end_date: last date in the returned train_df
costs: 'random' / 'equal' / path to csv file with costs
selected_geos: geos to prescribe for (list / path to csv file)
predictor: the predictor model used by the prescriptor
update_data: boolean for whether to re-download the Oxford data
Returns: (train_df, test_df, cost_df)
"""
assert (start_date is not None) and (end_date is not None)
df = load_historical_data(update_data=update_data)
# Test dataframe consists of NPI values up to start_date-1
pd_start_date = pd.to_datetime(start_date)
test_df = df[df['Date'] < pd_start_date].copy()
test_columns = ['GeoID', 'CountryName', 'RegionName', 'Date'] + NPI_COLUMNS
test_df = test_df[test_columns]
if costs not in ['equal', 'random']:
cost_df = pd.read_csv(costs)
else:
cost_df = generate_costs(test_df, mode=costs)
cost_df = add_geo_id(cost_df)
# Discard countries that will not be evaluated
if isinstance(selected_geos, str): # selected_geos can be a path to a csv
country_df = pd.read_csv(selected_geos,
encoding="ISO-8859-1",
dtype={'RegionName': str},
error_bad_lines=False)
country_df['RegionName'] = country_df['RegionName'].replace('', np.nan)
country_df['GeoID'] = np.where(country_df['RegionName'].isnull(),
country_df['CountryName'],
country_df['CountryName'] + ' / ' + country_df['RegionName'])
else: # selected_geos can also be a list of GeoIDs
country_df = pd.DataFrame.from_dict({'GeoID': selected_geos})
test_df = test_df[test_df['GeoID'].isin(country_df['GeoID'].unique())]
cost_df = cost_df[cost_df['GeoID'].isin(country_df['GeoID'].unique())]
# forget all historical data starting from start_date
train_df = df[df['Date'] < pd_start_date]
if predictor is not None:
predictor.df = predictor.df[predictor.df['Date'] < pd_start_date]
if train_start_date is not None:
# forget all historical data before train_start_date
pd_train_start_date = | pd.to_datetime(train_start_date) | pandas.to_datetime |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = | pd.concat([s1, s2], axis=1) | pandas.concat |
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq="D")
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = list(range(70000))
minor_axis = list(range(10))
major_codes = np.arange(70000)
minor_codes = np.repeat(range(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert index.is_unique is False
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product(
[np.arange(1000), np.arange(1000)], names=["one", "two"]
)
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp"))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_below_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_below_1000000.loc[(3, 0), "dest"]
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_above_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_above_1000000.loc[(3, 0), "dest"]
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame(
{"a": r, "b": r}, index=pd.MultiIndex.from_tuples([(x, x) for x in r])
)
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
df["a"].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile("does not support mutable operations")
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with pytest.raises(TypeError, match=mutable_regex):
codes[0] = codes[0]
with pytest.raises(ValueError, match="assignment destination is read-only"):
codes[0][0] = codes[0][0]
# and for names
names = idx.names
with pytest.raises(TypeError, match=mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
assert ind.is_monotonic
ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({"foo": foo.stack(), "bar": bar.stack()}, axis=1)
df.index.names = ["fizz", "buzz"]
str(df)
expected = pd.DataFrame(
{"bar": np.arange(100), "foo": np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)], names=["fizz", "buzz"]
),
)
| tm.assert_frame_equal(df, expected, check_like=True) | pandas.util.testing.assert_frame_equal |
import io
import pytest
import pandas
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from src.tables import Table
g_is_valid = None
g_expected = None
# Test_Dummy is used to allow for easy and precise tests of Table.
class Table_Dummy(Table):
def __init__(self, user=None, passwd=None, hostname=None, db_name=None, schema="hive", engine=None):
super().__init__(user, passwd, hostname, db_name, schema, engine)
self._table_name = "fake"
self._index_col = "fake_key"
self._expected_cols = [
"this",
"is",
"a",
"fake",
"table"
]
self._creation_sql = "".join(["""
CREATE TABLE IF NOT EXISTS """, self._schema, ".", self._table_name, """
(
fake_key BIGSERIAL PRIMARY KEY,
this SMALLINT,
is SMALLINT,
a SMALLINT,
fake SMALLINT,
table SMALLINT
);"""])
@pytest.fixture
def instance_fixture():
return Table_Dummy("sw23", "invalid", "localhost", "aperture")
@pytest.fixture
def dummy_engine():
user = "sw23"
passwd = "<PASSWORD>"
hostname = "localhost"
db_name = "idk_something"
engine_info = "".join(["postgresql://", user, ":", passwd, "@", hostname, "/", db_name])
return create_engine(engine_info), user, passwd, hostname, db_name
@pytest.fixture
def sample_df(instance_fixture):
test_list = [["a", "b", "c", "d", "e"], ["AA", "BB", "CC", "DD", "EE"]]
sample_df = pandas.DataFrame(test_list, columns=list(instance_fixture._expected_cols))
return sample_df
@pytest.fixture
def custom_read_sql(sample_df, instance_fixture):
def read_sql(sql, engine, index_col):
expected_sql = "".join(["SELECT * FROM ", instance_fixture._schema, ".", instance_fixture._table_name, ";"])
if sql != expected_sql:
return | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
from utilities.temporal_utils import get_gtfs_dates_by_type
from utilities.validators import validate_gtfs_representation
from utilities.constants import (
MONDAY,
TUESDAY,
WEDNESDAY,
THURSDAY,
FRIDAY,
SATURDAY,
SUNDAY,
DATE,
SERVICE_ID,
EXCEPTION_TYPE,
)
PD_DATE_FORMAT = "%Y%m%d"
SERVICE_DATE_FORMAT = "%Y-%m-%d"
DATE_KEY = "date"
DATASET_DATE_TYPE = "dataset_date_type"
FEED_DATE_KEY = "feed_date_type"
MIN_MAX_ATTR = "min_max_attr"
SERVICE_DATE_ATTR = "service_date_attr"
CALENDAR_DATE_KEY = "calendar_date_key"
START_DATE_MAP = {
DATASET_DATE_TYPE: "start_date",
FEED_DATE_KEY: "feed_start_date",
MIN_MAX_ATTR: "min",
SERVICE_DATE_ATTR: "start_service_date",
CALENDAR_DATE_KEY: "start_date",
}
END_DATE_MAP = {
DATASET_DATE_TYPE: "end_date",
FEED_DATE_KEY: "feed_end_date",
MIN_MAX_ATTR: "max",
SERVICE_DATE_ATTR: "end_service_date",
CALENDAR_DATE_KEY: "end_date",
}
CALENDAR_DATES_REQUIRED_COLUMNS = {
DATE,
SERVICE_ID,
EXCEPTION_TYPE,
}
CALENDAR_REQUIRED_COLUMNS = {
MONDAY,
TUESDAY,
WEDNESDAY,
THURSDAY,
FRIDAY,
SATURDAY,
SUNDAY,
SERVICE_ID,
}
def process_start_service_date_for_gtfs_metadata(gtfs_representation):
return process_service_date_for_gtfs_metadata(gtfs_representation, START_DATE_MAP)
def process_end_service_date_for_gtfs_metadata(gtfs_representation):
return process_service_date_for_gtfs_metadata(gtfs_representation, END_DATE_MAP)
def process_service_date_for_gtfs_metadata(gtfs_representation, service_date_map):
"""Execute the ``ProcessStartServiceDateForGtfsMetadata`` or ``ProcessEndServiceDateForGtfsMetadata`` use case
depending on which service_date it receives.
Process the start service date using the `feed_info`, `calendar` and `calendar_dates` files
from the GTFS dataset of the representation.
Add the start service date to the representation metadata once processed.
:param gtfs_representation: The representation of the GTFS dataset to process.
:param service_date_map: Either START_DATE_MAP or END_DATE_MAP.
:return: The representation of the GTFS dataset post-execution.
"""
validate_gtfs_representation(gtfs_representation)
dataset = gtfs_representation.dataset
metadata = gtfs_representation.metadata
calendar_required_columns = CALENDAR_REQUIRED_COLUMNS.union(
{service_date_map[CALENDAR_DATE_KEY]}
)
feed_info_is_present = (
dataset.feed_info is not None
and service_date_map[FEED_DATE_KEY] in dataset.feed_info.columns
and not dataset.feed_info[service_date_map[FEED_DATE_KEY]].isnull().values.all()
)
calendar_is_present = (
dataset.calendar is not None
and calendar_required_columns.issubset(dataset.calendar.columns)
)
calendar_dates_are_present = (
dataset.calendar_dates is not None
and CALENDAR_DATES_REQUIRED_COLUMNS.issubset(dataset.calendar_dates.columns)
)
if feed_info_is_present or calendar_is_present or calendar_dates_are_present:
if feed_info_is_present:
# Extract start service date from feed info if the file is provided
# or
# Extract end service date from feed info if the file is provided
feed_dates = dataset.feed_info[service_date_map[FEED_DATE_KEY]]
filtered_feed_info = dataset.feed_info.loc[feed_dates.notnull()]
dates = pd.to_datetime(
filtered_feed_info[service_date_map[FEED_DATE_KEY]],
format=PD_DATE_FORMAT,
)
else:
# Extract the start dates in the dataset representation
# or
# Extract the end dates in the dataset representation
dataset_dates = get_gtfs_dates_by_type(
dataset, date_type=service_date_map[DATASET_DATE_TYPE]
)
dates = pd.to_datetime(dataset_dates[DATE_KEY], format=PD_DATE_FORMAT)
# Get first start service date with min() and converting the date into a ISO 8601 string
# or
# Get last end service date with max() and converting the date into a ISO 8601 string
service_date = getattr(dates, service_date_map[MIN_MAX_ATTR])()
if | pd.notna(service_date) | pandas.notna |
import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
pyreadstat = pytest.importorskip("pyreadstat")
def test_spss_labelled_num(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "labelled-num.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0])
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": 1.0}, index=[0])
tm.assert_frame_equal(df, expected)
def test_spss_labelled_num_na(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "labelled-num-na.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": ["This is one", None]})
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = | pd.DataFrame({"VAR00002": [1.0, np.nan]}) | pandas.DataFrame |
#!/usr/bin/env python3
import sys
import struct
import pandas as pd
import matplotlib
# Must be before importing matplotlib.pyplot or pylab!
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
###############################################
dsize = 16
###############################################
def getFrame(data, iter = None):
if iter is None:
return data
else:
return data[data.iter==iter]
def dirtyCls(data, iter = None):
df = getFrame(data, iter)
return sum(df.bits.apply(lambda x: sum(x)))
def dirtyPages(data, iter = None):
df = getFrame(data, iter)
return len(df.page)
def dirtyClsB(data, iter = None):
return dirtyCls(data, iter) * 64
def dirtyPagesB(data, iter = None):
return dirtyPages(data, iter) * 4096
def avgDirtyCls(data):
numIter = len(data.iter.unique())
return dirtyCls(data) / float(numIter)
def avgDirtyPages(data):
numIter = len(data.iter.unique())
return dirtyPages(data) / float(numIter)
def avgDirtyClsPerPage(data, iter = None):
df = getFrame(data, iter)
numPages = dirtyPages(df)
return dirtyCls(df) / float(numPages)
def getDirtyCLsPerPage(fileContent, meta, iterFirst = None, iterLast = None):
if iterFirst is None:
### skip iteration 0 because we set all cache lines to dirty in that iteration
iterFirst = meta.iter.iloc[1]
if iterLast is None:
iterLast = len(meta.iter)
dfF = pd.DataFrame({'cnt':[0]*64}, index=range(1,65))
for i in range(iterFirst, iterLast):
data = getDataframeIter(fileContent, meta, i)
df = pd.DataFrame({'cnt':map((lambda XX: sum(data.bits.apply(lambda x: sum(x)) == XX)), range(1, 65))}, index=range(1,65))
dfF = dfF+df
return dfF
def getDiffPagesClsB(fileContent, meta, iterFirst = None, iterLast = None):
if iterFirst is None:
iterFirst = meta.iter.iloc[0]
if iterLast is None:
iterLast = len(meta.iter)
df = pd.DataFrame()
for i in range(iterFirst, iterLast):
data = getDataframeIter(fileContent, meta, i)
dcl = dirtyClsB(data)
dp = dirtyPagesB(data)
df1 = pd.DataFrame({'iter':[i], 'dirtyCl':[dcl], 'dirtyP':[dp], 'amplif':[dp*1.0/dcl], 'pcnt':[dcl*100.0/dp]})
df = df.append(df1)
return df
def readBinFile(filename):
with open(filename, mode='rb') as file:
fileContent = file.read()
return fileContent
def getMetadata(fileContent):
first = 0
totalSize = len(fileContent)
meta=pd.DataFrame()
while (first < totalSize):
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
df1 = pd.DataFrame({'iter':[iter], 'count':[count], 'pos':[first]})
meta = meta.append(df1)
first = count * dsize + (first + dsize)
return meta
def getDataframeWBitlist(fileContent):
first = 0
totalSize = len(fileContent)
data=pd.DataFrame()
while (first < totalSize):
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
output = struct.unpack(count*'QQ', fileContent[(first+dsize):count*dsize+(first+dsize)])
dfbits = | pd.DataFrame({'bits':output[1::2]}) | pandas.DataFrame |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons Public API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import datetime
import json
from itertools import product
from . import _auth
import pandas as pd
_PLACES = ('City', 'County', 'State', 'Country', 'Continent')
_CLIENT_ID = ('66054275879-a0nalqfe2p9shlv4jpra5jekfkfnr8ug.apps.googleusercontent.com')
_CLIENT_SECRET = '<KEY>'
_API_ROOT = 'https://datcom-api.appspot.com'
_MICRO_SECONDS = 1000000
_EPOCH_START = datetime.datetime(year=1970, month=1, day=1)
def _year_epoch_micros(year):
"""Get the timestamp of the start of a year in micro seconds.
Args:
year: An integer number of the year.
Returns:
Timestamp of the start of a year in micro seconds.
"""
now = datetime.datetime(year=year, month=1, day=1)
return int((now - _EPOCH_START).total_seconds()) * _MICRO_SECONDS
def _date_epoch_micros(date_string):
"""Get the timestamp of the date string in micro seconds.
Args:
date_string: An string of date
Returns:
Timestamp of the start of a year in micro seconds.
"""
now = datetime.datetime.strptime(date_string, '%Y-%m-%d')
return int((now - _EPOCH_START).total_seconds()) * _MICRO_SECONDS
class Client(object):
"""Provides Data Commons API."""
def __init__(self,
client_id=_CLIENT_ID,
client_secret=_CLIENT_SECRET,
api_root=_API_ROOT):
self._service = _auth.do_auth(client_id, client_secret, api_root)
response = self._service.get_prop_type(body={}).execute()
self._prop_type = defaultdict(dict)
self._inv_prop_type = defaultdict(dict)
for t in response.get('type_info', []):
self._prop_type[t['node_type']][t['prop_name']] = t['prop_type']
if t['prop_type'] != 'Text':
self._inv_prop_type[t['prop_type']][t['prop_name']] = t['node_type']
self._inited = True
def query(self, datalog_query, max_rows=100):
"""Performs a query returns results as a table.
Args:
datalog_query: string representing datalog query in [TODO(shanth): link]
max_rows: max number of returned rows.
Returns:
A pandas.DataFrame with the selected variables in the query as the
the column names. If the query returns multiple values for a property then
the result is flattened into multiple rows.
Raises:
RuntimeError: some problem with executing query (hint in the string)
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
try:
response = self._service.query(body={
'query': datalog_query,
'options': {
'row_count_limit': max_rows
}
}).execute()
except Exception as e: # pylint: disable=broad-except
raise RuntimeError('Failed to execute query: %s' % e)
header = response.get('header', [])
rows = response.get('rows', [])
result_dict = {header: [] for header in header}
for row in rows:
cells = row.get('cells', [])
if len(cells) != len(header):
raise RuntimeError(
'Response #cells mismatches #header: {}'.format(response))
cell_values = []
for key, cell in zip(header, cells):
if not cell:
cell_values.append([''])
else:
try:
cell_values.append(cell['value'])
except KeyError:
raise RuntimeError('No value in cell: {}'.format(row))
# Iterate through the cartesian product to flatten the query results.
for values in product(*cell_values):
for idx, key in enumerate(header):
result_dict[key].append(values[idx])
return pd.DataFrame(result_dict)[header]
def expand(self,
pd_table,
arc_name,
seed_col_name,
new_col_name,
outgoing=True,
max_rows=100):
"""Create a new column with values for the given property.
The existing pandas dataframe should include a column containing entity IDs
for a certain schema.org type. This function populates a new column with
property values for the entities and adds additional rows if a property has
repeated values.
Args:
pd_table: Pandas dataframe that contains entity information.
arc_name: The property to add to the table.
seed_col_name: The column name that contains entity (ids) that the added
properties belong to.
new_col_name: New column name.
outgoing: Set this flag if the property points away from the entities
denoted by the seed column.
max_rows: The maximum number of rows returned by the query results.
Returns:
A pandas.DataFrame with the additional column and rows added.
Raises:
ValueError: when input argument is not valid.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute query'
if seed_col_name not in pd_table:
raise ValueError('%s is not a valid seed column name' % seed_col_name)
if new_col_name in pd_table:
raise ValueError(
'%s is already a column name in the data frame' % new_col_name)
seed_col = pd_table[seed_col_name]
seed_col_type = seed_col[0]
assert seed_col_type != 'Text', 'Parent entity should not be Text'
# Determine the new column type
if outgoing:
if arc_name not in self._prop_type[seed_col_type]:
raise ValueError(
'%s does not have outgoing property %s' % (seed_col_type, arc_name))
new_col_type = self._prop_type[seed_col_type][arc_name]
else:
if arc_name not in self._inv_prop_type[seed_col_type]:
raise ValueError(
'%s does not have incoming property %s' % (seed_col_type, arc_name))
new_col_type = self._inv_prop_type[seed_col_type][arc_name]
dcids = ' '.join(seed_col[1:]).strip()
if not dcids:
# All entries in the seed column are empty strings. The new column should
# contain no entries.
pd_table[new_col_name] = ""
pd_table[new_col_name][0] = new_col_type
return pd_table
seed_col_var = seed_col_name.replace(' ', '_')
new_col_var = new_col_name.replace(' ', '_')
if outgoing:
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'{arc_name} ?node ?{new_col_var}').format(
arc_name=arc_name,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
new_col_var=new_col_var,
dcids=dcids)
else:
query = ('SELECT ?{seed_col_var} ?{new_col_var},'
'typeOf ?node {seed_col_type},'
'dcid ?node {dcids},'
'dcid ?node ?{seed_col_var},'
'{arc_name} ?{new_col_var} ?node').format(
arc_name=arc_name,
seed_col_var=seed_col_var,
seed_col_type=seed_col_type,
new_col_var=new_col_var,
dcids=dcids)
# Run the query and merge the results.
return self._query_and_merge(
pd_table,
query,
seed_col_name,
new_col_name,
seed_col_var,
new_col_var,
new_col_type,
max_rows=max_rows)
# ----------------------- OBSERVATION QUERY FUNCTIONS -----------------------
def get_instances(self, col_name, instance_type, max_rows=100):
"""Get a list of instance dcids for a given type.
Args:
col_name: Column name for the returned column.
instance_type: String of the instance type.
max_rows: Max number of returend rows.
Returns:
A pandas.DataFrame with instance dcids.
"""
assert self._inited, 'Initialization was unsuccessful, cannot execute Query'
query = ('SELECT ?{col_name},'
'typeOf ?node {instance_type},'
'dcid ?node ?{col_name}').format(
col_name=col_name, instance_type=instance_type)
type_row = pd.DataFrame(data=[{col_name: instance_type}])
try:
dcid_column = self.query(query, max_rows)
except RuntimeError as e:
raise RuntimeError('Execute query\n%s\ngot an error:\n%s' % (query, e))
return | pd.concat([type_row, dcid_column], ignore_index=True) | pandas.concat |
'''
展示权重的重要性得分
'''
import os
import matplotlib.pyplot as plt
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Timeline, Bar, HeatMap, Line, Page
from pyecharts.faker import Faker
from pyecharts.globals import ThemeType
import numpy as np
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = os.path.join(PROJECT_PATH, '../data/fishBehavior')
plt.rc('font', family='Times New Roman')
fontsize = 12.5
ANGLE_NAME = ['Angle_0.0', 'Angle_20.0', 'Angle_40.0', 'Angle_60.0', 'Angle_80.0', 'Angle_100.0', 'Angle_120.0',
'Angle_140.0', 'Angle_160.0']
ACC_NAME = ['AccSpeed_0.0','AccSpeed_2.0','AccSpeed_4.0','AccSpeed_6.0','AccSpeed_8.0']
def format_data(data: pd.DataFrame, time_list: list, name_list: list) -> dict:
data = data.T.to_dict()
fdata = {}
for t_id, vdata in data.items():
fdata[time_list[t_id]] = [v for region, v in vdata.items()]
for min_t in time_list:
temp = fdata[min_t]
for i in range(len(temp)):
fdata[min_t][i] = {"name": name_list[i], "value": temp[i]}
return fdata
#####################################################################################
# 2002 - 2011 年的数据
def get_year_overlap_chart(total_data, time_mim: int) -> Bar:
bar = (
Bar()
.add_xaxis(xaxis_data=name_list)
)
bar.add_yaxis(
series_name="velocity",
y_axis=total_data["velocity"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack1'
)
bar.add_yaxis(
series_name="distance",
y_axis=total_data["distance"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack1'
)
bar.add_yaxis(
series_name="velocity",
y_axis=total_data["velocity"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack2'
)
bar.add_yaxis(
series_name="distance",
y_axis=total_data["distance"][time_mim],
is_selected=True,
label_opts=opts.LabelOpts(is_show=False),
stack=f'stack2'
)
# print(total_data["bottom_time"][time_mim])
# print(Faker.values())
# exit(33)
# bar.add_yaxis("moving time", [31, 58, 80, 26], stack="stack1", category_gap="50%")
# bar.add_yaxis("static time", [31, 58, 80, 26], stack="stack1", category_gap="50%")
bar.set_global_opts(
title_opts=opts.TitleOpts(
title="{}分钟后,斑马鱼运动指标".format(time_mim)
),
datazoom_opts=opts.DataZoomOpts(),
tooltip_opts=opts.TooltipOpts(
is_show=True, trigger="axis", axis_pointer_type="shadow"
),
)
return bar
def getLine(v_data, name):
l = (
Line()
.add_xaxis(xaxis_data=[str(_) for _ in time_list])
.add_yaxis(
series_name="1_1",
y_axis=v_data['1_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="2_CK",
y_axis=v_data['2_CK'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="3_1",
y_axis=v_data['3_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="4_1",
y_axis=v_data['4_1'],
label_opts=opts.LabelOpts(is_show=False),
)
.set_series_opts(
areastyle_opts=opts.AreaStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(
title_opts=opts.TitleOpts(title=name),
tooltip_opts=opts.TooltipOpts(trigger="axis"),
datazoom_opts=opts.DataZoomOpts(),
yaxis_opts=opts.AxisOpts(
type_="value",
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
),
xaxis_opts=opts.AxisOpts(type_="category", boundary_gap=False),
)
)
return l
def getStackBar(top_data, bottom_time, name1, name2, name):
def format(t):
region = {}
for i in name_list:
td = t[i].values
list1 = []
for v in td:
list1.append({
"value": v,
"percent": v,
})
region[i] = list1
return region
td = format(top_data)
bd = format(bottom_time)
c = (
Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
.add_xaxis(["Time " + str(_) + ":" + "/".join(name_list) for _ in time_list])
)
for idx, i in enumerate(name_list):
c.add_yaxis(name1, td[i], stack=f'stack{idx}')
c.add_yaxis(name2, bd[i], stack=f'stack{idx}')
c.set_series_opts(
label_opts=opts.LabelOpts(is_show=False)
)
c.set_global_opts(
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15)),
datazoom_opts=opts.DataZoomOpts(),
title_opts=opts.TitleOpts(title=name)
)
return c
def getHeatMap(data, time_list, name):
def formatHeatmapData(rdata):
heat_data = []
rdata = np.around(rdata, decimals=3)
for t in range(rdata.shape[0]):
for a in range(rdata.shape[1]):
heat_data.append([t, a, rdata[t][a]])
return heat_data
c = (
HeatMap()
)
c.add_xaxis(time_list)
for region_name, v in data.items():
heat_data = formatHeatmapData(data[region_name].values)
if 'Acceleration' in name:
c.add_yaxis(
region_name,
ACC_NAME,
heat_data,
label_opts=opts.LabelOpts(is_show=True, position="inside"),
)
elif 'Angle' in name:
c.add_yaxis(
region_name,
ANGLE_NAME,
heat_data,
label_opts=opts.LabelOpts(is_show=True, position="inside"),
)
c.set_global_opts(
title_opts=opts.TitleOpts(title=name),
datazoom_opts=opts.DataZoomOpts(),
visualmap_opts=opts.VisualMapOpts(min_=0, max_=1),
)
return c
if __name__ == '__main__':
import argparse
import pandas as pd
ap = argparse.ArgumentParser()
ap.add_argument("-tid", "--t_ID", default="D01")
ap.add_argument("-lid", "--l_ID", default="D02")
ap.add_argument("-rid", "--r_ID", default="D04")
ap.add_argument("-iP", "--indicatorPath", default="E:\\data\\3D_pre/exp_pre/indicators/")
ap.add_argument("-o", "--outputPath", default="E:\\data\\3D_pre/exp_pre/results/")
args = vars(ap.parse_args())
outputPath = args["outputPath"]
if not os.path.exists(outputPath):
os.mkdir(outputPath)
files = os.listdir(args["indicatorPath"])
all_nos = []
for ifile in files:
no = ifile.split("_")[0]
start_no, end_no = no.split("-")
str_start_no = start_no.zfill(4)
str_end_no = end_no.zfill(4)
if (str_start_no, str_end_no) in all_nos:
continue
else:
all_nos.append((str_start_no, str_end_no))
all_nos.sort()
time_list = [_ for _ in range(0, int(all_nos[-1][1]))]
total_data = {}
name_list = [
"1_1",
"2_CK",
"3_1",
"4_1"
]
v_data = pd.DataFrame()
d_data = pd.DataFrame()
top_data = pd.DataFrame()
bottom_time = pd.DataFrame()
stop_time = pd.DataFrame()
moving_time = pd.DataFrame()
angle_data = {region_name: None for region_name in name_list}
acc_data = {region_name: None for region_name in name_list}
for ino in all_nos:
no_v_data = pd.DataFrame()
no_d_data = pd.DataFrame()
no_top_data = pd.DataFrame()
no_bottom_time = pd.DataFrame()
no_stop_time = pd.DataFrame()
no_moving_time = | pd.DataFrame() | pandas.DataFrame |
"""
Really, mostly data getters.
get_toi1937_lightcurve
get_groundphot
get_autorotation_dataframe
get_gaia_basedata
_get_nbhd_dataframes
_get_fullfaint_dataframes
_get_fullfaint_edr3_dataframes
_get_denis_fullfaint_edr3_dataframes
_get_extinction_dataframes
_get_median_ngc2516_core_params
get_denis_xmatch
append_phot_binary_column
PleaidesQuadProtModel
"""
import os, collections, pickle
import numpy as np, pandas as pd
from glob import glob
from copy import deepcopy
from numpy import array as nparr
from astropy.io import fits
from astropy import units as u
from astropy.table import Table
from astroquery.vizier import Vizier
from astroquery.xmatch import XMatch
import cdips.utils.lcutils as lcu
import cdips.lcproc.detrend as dtr
import cdips.lcproc.mask_orbit_edges as moe
from cdips.utils.catalogs import (
get_cdips_catalog, get_tic_star_information
)
from cdips.utils.gaiaqueries import (
query_neighborhood, given_source_ids_get_gaia_data,
given_dr2_sourceids_get_edr3_xmatch
)
from earhart.paths import PHOTDIR, RESULTSDIR, DATADIR
def get_toi1937_lightcurve():
"""
Create the stitched CDIPS FFI light curve for TOI 1937. (Starting from the
raw light curves, and the PCA eigenvectors previously made for this
sector). Note: the main execution of this PCA detrending happens on
phtess2.
A few notes:
* 3 eigenvectors were used, plus the background light BGV timeseries.
* a +/-12 hour orbit edge mask was used (to avoid what looked like
scattered light)
* the output can be checked at
/results/quicklook_lcs/5489726768531119616_allvar_report.pdf
"""
picklepath = os.path.join(
PHOTDIR, 'toi1937_merged_stitched_s7s9_lc_20201130.pkl'
)
if not os.path.exists(picklepath):
# Use the CDIPS IRM2 light curves as starting base.
# 5489726768531119616_s09_llc.fits
lcpaths = glob(os.path.join(PHOTDIR, '*_s??_llc.fits'))
assert len(lcpaths) == 2
infodicts = [
{'SECTOR': 7, 'CAMERA': 3, 'CCD': 4, 'PROJID': 1527},
{'SECTOR': 9, 'CAMERA': 3, 'CCD': 3, 'PROJID': 1558},
]
##########################################
# next ~45 lines pinched from cdips.drivers.do_allvariable_report_making
##########################################
#
# detrend systematics. each light curve yields tuples of:
# primaryhdr, data, ap, dtrvecs, eigenvecs, smooth_eigenvecs
#
dtr_infos = []
for lcpath, infodict in zip(lcpaths, infodicts):
dtr_info = dtr.detrend_systematics(
lcpath, infodict=infodict, max_n_comp=3
)
dtr_infos.append(dtr_info)
#
# stitch all available light curves
#
ap = dtr_infos[0][2]
timelist = [d[1]['TMID_BJD'] for d in dtr_infos]
maglist = [d[1][f'PCA{ap}'] for d in dtr_infos]
magerrlist = [d[1][f'IRE{ap}'] for d in dtr_infos]
extravecdict = {}
extravecdict[f'IRM{ap}'] = [d[1][f'IRM{ap}'] for d in dtr_infos]
for i in range(0,7):
extravecdict[f'CBV{i}'] = [d[3][i, :] for d in dtr_infos]
time, flux, fluxerr, vec_dict = lcu.stitch_light_curves(
timelist, maglist, magerrlist, extravecdict
)
#
# mask orbit edges
#
s_time, s_flux, inds = moe.mask_orbit_start_and_end(
time, flux, raise_expectation_error=False, orbitgap=0.7,
orbitpadding=12/24,
return_inds=True
)
s_fluxerr = fluxerr[inds]
#
# save output
#
ap = dtr_infos[0][2]
lcdict = {
'source_id': np.int64(5489726768531119616),
'E_BpmRp': 0.1343,
'ap': ap,
'TMID_BJD': time,
f'IRM{ap}': vec_dict[f'IRM{ap}'],
f'PCA{ap}': flux,
f'IRE{ap}': fluxerr,
'STIME': s_time.astype(np.float64),
f'SPCA{ap}': s_flux.astype(np.float64),
f'SPCAE{ap}': s_fluxerr.astype(np.float64),
'dtr_infos': dtr_infos,
'vec_dict': vec_dict,
'tess_texp': np.nanmedian(np.diff(s_time))
}
with open(picklepath , 'wb') as f:
pickle.dump(lcdict, f)
#
# verify output
#
from cdips.plotting.allvar_report import make_allvar_report
plotdir = os.path.join(RESULTSDIR, 'quicklook_lcs')
outd = make_allvar_report(lcdict, plotdir)
with open(picklepath, 'rb') as f:
print(f'Found {picklepath}: loading it!')
lcdict = pickle.load(f)
return (
lcdict['STIME'].astype(np.float64) - 2457000,
lcdict['SPCA2'].astype(np.float64),
lcdict['SPCAE2'].astype(np.float64),
lcdict['tess_texp']
)
def get_groundphot(datestr=None):
lcglob = os.path.join(PHOTDIR, 'collected',
f'*{datestr}*.txt')
lcpath = glob(lcglob)
assert len(lcpath) == 1
lcpath = lcpath[0]
if 'epdlc' in lcpath:
# LCOGT reduced by Joel Hartman format.
colnames = [
"frameid", "time_bjd_UTC_minus_2400000", "raw_mag_ap1",
"raw_mag_err_ap1", "quality_ap1", "raw_mag_ap2", "raw_mag_err_ap2",
"quality_ap2", "raw_mag_ap3", "raw_mag_err_ap3", "quality_ap3",
"fit_mag_ap1", "fit_mag_ap2", "fit_mag_ap3", "epd_mag_ap1",
"epd_mag_ap2", "epd_mag_ap3", "x_px", "y_px", "bkgd",
"bkgd_deviation", "S", "D", "K", "hour_angle", "zenith_distance",
"time_JD_UTC"
]
df = pd.read_csv(lcpath, delim_whitespace=True, names=colnames,
comment='#')
# TT = TAI + 32.184 = UTC + (number of leap seconds) + 32.184
# TDB ~= TT
# for these data the leap second list indicates 37 is the correct
# number: https://www.ietf.org/timezones/data/leap-seconds.list
t_offset = (37 + 32.184)*u.second
x_obs_bjd_utc = np.array(df["time_bjd_UTC_minus_2400000"]) + 2400000
# return times in BJD_TDB
x_obs = x_obs_bjd_utc + float(t_offset.to(u.day).value)
y_obs, y_err = (
lcu._given_mag_get_flux(df['fit_mag_ap1'], df["raw_mag_err_ap1"])
)
t_exp = np.nanmedian(np.diff(x_obs))
elif 'El_Sauce' in lcpath:
# <NAME>'s El Sauce reduction format.
raise NotImplementedError
return x_obs, y_obs, y_err, t_exp
def get_gaia_basedata(basedata):
if basedata == 'extinctioncorrected':
raise NotImplementedError('need to implement extinction')
nbhd_df, core_df, halo_df, full_df, target_df = _get_extinction_dataframes()
elif basedata == 'fullfaint':
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_dataframes()
elif basedata == 'fullfaint_edr3':
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_edr3_dataframes()
elif basedata == 'bright':
nbhd_df, core_df, halo_df, full_df, target_df = _get_nbhd_dataframes()
else:
raise NotImplementedError
full_df = append_phot_binary_column(full_df)
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_nbhd_dataframes():
"""
WARNING!: this "bright" subset is a crossmatch between the full NGC 2516
target list (CG18+KC19+M21), and the CDIPS target catalog (G_Rp<16; v0.4).
However, since the CDIPS targets didn't incorporate M21, it's not as direct
of a match as desired. This is fine for understanding the auto-detection of
rotation periods. But for overall cluster rotation period completeness,
it's not.
The "neighborhood" was selected via
bounds = { 'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45 }
nbhd_df = query_neighborhood(bounds, groupname, n_max=6000,
overwrite=False, manual_gmag_limit=17)
This procedure yields:
Got 7052 neighbors with Rp<16
Got 893 in core from CDIPS target catalog
Got 1345 in corona from CDIPS target catalog
"""
df = get_cdips_catalog(ver=0.4)
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_dataframes()
#
# do the "bright" selection by a crossmatch between the full target list
# and the CDIPS catalog. so implicitly, it's a CDIPS target star catalog
# match. this misses some Meingast stars, b/c they weren't in the CDIPS
# v0.4 target list. but this
#
cdips_df = df['source_id']
mdf = full_df.merge(cdips_df, on='source_id', how='inner')
nbhd_df = nbhd_df[nbhd_df.phot_rp_mean_mag < 16]
core_df = mdf[mdf.subcluster == 'core']
halo_df = mdf[mdf.subcluster == 'halo']
print(42*'.')
print('"Bright" sample:')
print(f'...Got {len(nbhd_df)} neighbors with Rp<16')
print(f'...Got {len(core_df)} in core from CDIPS target catalog')
print(f'...Got {len(halo_df)} in corona from CDIPS target catalog')
print(42*'.')
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_fullfaint_dataframes():
"""
Return: nbhd_df, core_df, halo_df, full_df, target_df
(for NGC 2516, "full faint" sample -- i.e., as faint as possible.)
The "core" is all available Cantat-Gaudin 2018 members, with no magnitude
cutoff.
The "halo" is the full Kounkel & Covey 2019 + Meingast 2021 member set,
provided that the source is not in the core. (i.e., KC19 and M21 get no
points for getting the "core" targets correct).
The "neighborhood" was selected via
bounds = { 'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45 }
nbhd_df = query_neighborhood(bounds, groupname, n_max=14000,
overwrite=False, manual_gmag_limit=19)
This procedure yields:
Got 1106 in fullfaint CG18
Got 3003 in fullfaint KC19
Got 1860 in fullfaint M21
Got 1912 in fullfaint KC19 after removing core matches
Got 1096 in fullfaint M21 after removing core matches
Got 280 in fullfaint M21 after removing KC19 matches
Got 13834 neighbors
Got 1106 in core
Got 2192 in corona
Got 1091 KC19 / CG18 overlaps
Got 764 M21 / CG18 overlaps
Got 3298 unique sources in the cluster.
"""
# get the full CG18 NGC 2516 memberships, downloaded from Vizier
cg18path = os.path.join(DATADIR, 'gaia',
'CantatGaudin2018_vizier_only_NGC2516.fits')
hdul = fits.open(cg18path)
cg18_tab = Table(hdul[1].data)
cg18_df = cg18_tab.to_pandas()
cg18_df['source_id'] = cg18_df['Source']
# get the full KC19 NGC 2516 memberships, from Marina's file
# NGC 2516 == "Theia 613" in Kounkel's approach.
kc19path = os.path.join(DATADIR, 'gaia', 'string_table1.csv')
kc19_df = pd.read_csv(kc19path)
kc19_df = kc19_df[kc19_df.group_id == 613]
# get the full M21 NGC 2516 memberships
m21path = os.path.join(DATADIR, 'gaia', 'Meingast_2021_NGC2516_all1860members.fits')
m21_df = Table(fits.open(m21path)[1].data).to_pandas()
m21_df = m21_df.rename(mapper={'GaiaDR2': 'source_id'}, axis=1)
print(f'Got {len(cg18_df)} in fullfaint CG18')
print(f'Got {len(kc19_df)} in fullfaint KC19')
print(f'Got {len(m21_df)} in fullfaint M21')
kc19_cg18_overlap_df = kc19_df[(kc19_df.source_id.isin(cg18_df.source_id))]
kc19_df = kc19_df[~(kc19_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(kc19_df)} in fullfaint KC19 after removing core matches')
m21_cg18_overlap_df = m21_df[(m21_df.source_id.isin(cg18_df.source_id))]
m21_df = m21_df[~(m21_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing core matches')
m21_df = m21_df[~(m21_df.source_id.isin(kc19_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing KC19 matches')
##########
# NGC 2516 rough
bounds = {
'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45
}
groupname = 'customngc2516_fullfaint'
nbhd_df = query_neighborhood(bounds, groupname, n_max=14000,
overwrite=False, manual_gmag_limit=19)
# query gaia DR2 to get the fullfaint photometry
kc19_df_0 = given_source_ids_get_gaia_data(
np.array(kc19_df.source_id),
'ngc2516_kc19_earhart_fullfaint', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True
)
cg18_df_0 = given_source_ids_get_gaia_data(
np.array(cg18_df.Source),
'ngc2516_cg18_earhart_fullfaint', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True
)
m21_df_0 = given_source_ids_get_gaia_data(
np.array(m21_df.source_id),
'ngc2516_m21_earhart_fullfaint', n_max=10000, overwrite=False,
enforce_all_sourceids_viable=True
)
assert len(cg18_df) == len(cg18_df_0)
assert len(kc19_df) == len(kc19_df_0)
assert len(m21_df) == len(m21_df_0)
target_df = kc19_df_0[kc19_df_0.source_id == 5489726768531119616] # TIC 2683...
sel_nbhd = (
(~nbhd_df.source_id.isin(kc19_df.source_id))
&
(~nbhd_df.source_id.isin(cg18_df.source_id))
&
(~nbhd_df.source_id.isin(m21_df.source_id))
)
orig_nbhd_df = deepcopy(nbhd_df)
nbhd_df = nbhd_df[sel_nbhd]
print(f'Got {len(nbhd_df)} neighbors')
print(f'Got {len(cg18_df)} in core')
print(f'Got {len(kc19_df)+len(m21_df)} in corona')
print(f'Got {len(kc19_cg18_overlap_df)} KC19 / CG18 overlaps')
print(f'Got {len(m21_cg18_overlap_df)} M21 / CG18 overlaps')
#
# wrap up into the full source list
#
cg18_df_0['subcluster'] = 'core'
kc19_df_0['subcluster'] = 'halo'
m21_df_0['subcluster'] = 'halo'
core_df = cg18_df_0
halo_df = pd.concat((kc19_df_0, m21_df_0)).reset_index()
full_df = pd.concat((core_df, halo_df)).reset_index()
assert len(np.unique(full_df.source_id)) == len(full_df)
print(f'Got {len(full_df)} unique sources in the cluster.')
full_df['in_CG18'] = full_df.source_id.isin(cg18_df.source_id)
kc19_df = pd.read_csv(kc19path)
kc19_df = kc19_df[kc19_df.group_id == 613]
full_df['in_KC19'] = full_df.source_id.isin(kc19_df.source_id)
m21_df = Table(fits.open(m21path)[1].data).to_pandas()
m21_df = m21_df.rename(mapper={'GaiaDR2': 'source_id'}, axis=1)
full_df['in_M21'] = full_df.source_id.isin(m21_df.source_id)
return nbhd_df, core_df, halo_df, full_df, target_df
def _get_fullfaint_edr3_dataframes():
"""
Return: nbhd_df, core_df, halo_df, full_df, target_df
(for NGC 2516, "full faint" sample -- i.e., as faint as possible, but
***after crossmatching the GAIA DR2 targets with GAIA EDR3***. This
crossmatch is run using the dr2_neighbourhood table from the Gaia archive,
and then taking the closest angular separation match for cases with
multiple matches.)
Further notes are in "_get_fullfaint_dataframes" docstring.
This procedure yields:
FOR DR2:
Got 1106 in fullfaint CG18
Got 3003 in fullfaint KC19
Got 1860 in fullfaint M21
Got 1912 in fullfaint KC19 after removing core matches
Got 1096 in fullfaint M21 after removing core matches
Got 280 in fullfaint M21 after removing KC19 matches
Got 13834 neighbors
Got 1106 in core
Got 2192 in corona
Got 1091 KC19 / CG18 overlaps
Got 764 M21 / CG18 overlaps
FOR EDR3:
Got 1106 EDR3 matches in core.
99th pct [arcsec] 1577.8 -> 0.3
Got 1912 EDR3 matches in KC19.
99th pct [arcsec] 1702.8 -> 0.5
Got 280 EDR3 matches in M21.
99th pct [arcsec] 1426.6 -> 0.3
Got 13843 EDR3 matches in nbhd.
99th pct [arcsec] 1833.9 -> 3.7
(((
CG18/core: got 1143 matches vs 1106 source id queries.
KC19/halo: got 2005 matches vs 1912 source id queries
Nbhd: got 15123 matches vs 13843 source id queries.
)))
"""
# get the full CG18 NGC 2516 memberships, downloaded from Vizier
cg18path = os.path.join(DATADIR, 'gaia',
'CantatGaudin2018_vizier_only_NGC2516.fits')
hdul = fits.open(cg18path)
cg18_tab = Table(hdul[1].data)
cg18_df = cg18_tab.to_pandas()
cg18_df['source_id'] = cg18_df['Source']
# get the full KC19 NGC 2516 memberships, from Marina's file
# NGC 2516 == "Theia 613" in Kounkel's approach.
kc19path = os.path.join(DATADIR, 'gaia', 'string_table1.csv')
kc19_df = pd.read_csv(kc19path)
kc19_df = kc19_df[kc19_df.group_id == 613]
# get the full M21 NGC 2516 memberships
m21path = os.path.join(DATADIR, 'gaia', 'Meingast_2021_NGC2516_all1860members.fits')
m21_df = Table(fits.open(m21path)[1].data).to_pandas()
m21_df = m21_df.rename(mapper={'GaiaDR2': 'source_id'}, axis=1)
print(42*'='+'\nFOR DR2:')
print(f'Got {len(cg18_df)} in fullfaint CG18')
print(f'Got {len(kc19_df)} in fullfaint KC19')
print(f'Got {len(m21_df)} in fullfaint M21')
kc19_cg18_overlap_df = kc19_df[(kc19_df.source_id.isin(cg18_df.source_id))]
kc19_df = kc19_df[~(kc19_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(kc19_df)} in fullfaint KC19 after removing core matches')
m21_cg18_overlap_df = m21_df[(m21_df.source_id.isin(cg18_df.source_id))]
m21_df = m21_df[~(m21_df.source_id.isin(cg18_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing core matches')
m21_df = m21_df[~(m21_df.source_id.isin(kc19_df.source_id))]
print(f'Got {len(m21_df)} in fullfaint M21 after removing KC19 matches')
##########
# NGC 2516 rough
bounds = {
'parallax_lower': 1.5, 'parallax_upper': 4.0, 'ra_lower': 108,
'ra_upper': 132, 'dec_lower': -76, 'dec_upper': -45
}
groupname = 'customngc2516_fullfaint'
nbhd_df = query_neighborhood(bounds, groupname, n_max=14000,
overwrite=False, manual_gmag_limit=19)
sel_nbhd = (
(~nbhd_df.source_id.isin(kc19_df.source_id))
&
(~nbhd_df.source_id.isin(cg18_df.source_id))
&
(~nbhd_df.source_id.isin(m21_df.source_id))
)
orig_nbhd_df = deepcopy(nbhd_df)
nbhd_df = nbhd_df[sel_nbhd]
print(f'Got {len(nbhd_df)} neighbors')
print(f'Got {len(cg18_df)} in core')
print(f'Got {len(kc19_df)+len(m21_df)} in corona')
print(f'Got {len(kc19_cg18_overlap_df)} KC19 / CG18 overlaps')
print(f'Got {len(m21_cg18_overlap_df)} M21 / CG18 overlaps')
assert (
len(cg18_df)+len(kc19_df)+len(m21_df) ==
len(np.unique(np.array( | pd.concat((cg18_df, kc19_df, m21_df)) | pandas.concat |
import numpy as np
import os
import pandas as pd
import shutil
import sys
import tempfile
import time
from contextlib import contextmanager
from fastparquet import write, ParquetFile
from fastparquet.util import join_path
@contextmanager
def measure(name, result):
t0 = time.time()
yield
t1 = time.time()
result[name] = round((t1 - t0) * 1000, 3)
def time_column():
with tmpdir() as tempdir:
result = {}
fn = join_path(tempdir, 'temp.parq')
n = 10000000
r = np.random.randint(-1e10, 1e10, n, dtype='int64')
d = pd.DataFrame({'w': pd.Categorical(np.random.choice(
['hi', 'you', 'people'], size=n)),
'x': r.view('timedelta64[ns]'),
'y': r / np.random.randint(1, 1000, size=n),
'z': np.random.randint(0, 127, size=n,
dtype=np.uint8)})
d['b'] = r > 0
for col in d.columns:
df = d[[col]]
write(fn, df)
with measure('%s: write, no nulls' % d.dtypes[col], result):
write(fn, df, has_nulls=False)
pf = ParquetFile(fn)
pf.to_pandas() # warm-up
with measure('%s: read, no nulls' % d.dtypes[col], result):
pf.to_pandas()
with measure('%s: write, no nulls, has_null=True' % d.dtypes[col], result):
write(fn, df, has_nulls=True)
pf = ParquetFile(fn)
pf.to_pandas() # warm-up
with measure('%s: read, no nulls, has_null=True' % d.dtypes[col], result):
pf.to_pandas()
if d.dtypes[col].kind == 'm':
d.loc[n//2, col] = pd.to_datetime('NaT')
elif d.dtypes[col].kind == 'f':
d.loc[n//2, col] = np.nan
elif d.dtypes[col].kind in ['i', 'u']:
continue
else:
d.loc[n//2, col] = None
with measure('%s: write, with null, has_null=True' % d.dtypes[col], result):
write(fn, df, has_nulls=True)
pf = ParquetFile(fn)
pf.to_pandas() # warm-up
with measure('%s: read, with null, has_null=True' % d.dtypes[col], result):
pf.to_pandas()
with measure('%s: write, with null, has_null=False' % d.dtypes[col], result):
write(fn, df, has_nulls=False)
pf = ParquetFile(fn)
pf.to_pandas() # warm-up
with measure('%s: read, with null, has_null=False' % d.dtypes[col], result):
pf.to_pandas()
return result
def time_text():
with tmpdir() as tempdir:
result = {}
fn = join_path(tempdir, 'temp.parq')
n = 1000000
d = pd.DataFrame({
'a': np.random.choice(['hi', 'you', 'people'], size=n),
'b': np.random.choice([b'hi', b'you', b'people'], size=n)})
for col in d.columns:
for fixed in [None, 6]:
df = d[[col]]
if isinstance(df.iloc[0, 0], bytes):
t = "bytes"
else:
t = 'utf8'
write(fn, df)
with measure('%s: write, fixed: %s' % (t, fixed), result):
write(fn, df, has_nulls=False, write_index=False,
fixed_text={col: fixed}, object_encoding=t)
pf = ParquetFile(fn)
pf.to_pandas() # warm-up
with measure('%s: read, fixed: %s' % (t, fixed), result):
pf.to_pandas()
return result
def time_find_nulls(N=10000000):
x = np.random.random(N)
df = pd.DataFrame({'x': x})
result = {}
run_find_nulls(df, result)
df.loc[N//2, 'x'] = np.nan
run_find_nulls(df, result)
df.loc[:, 'x'] = np.nan
df.loc[N//2, 'x'] = np.random.random()
run_find_nulls(df, result)
df.loc[N//2, 'x'] = np.nan
run_find_nulls(df, result)
x = np.random.randint(0, 2**30, N)
df = pd.DataFrame({'x': x})
run_find_nulls(df, result)
df = pd.DataFrame({'x': x.view('datetime64[s]')})
run_find_nulls(df, result)
v = df.loc[N//2, 'x']
df.loc[N//2, 'x'] = pd.to_datetime('NaT')
run_find_nulls(df, result)
df.loc[:, 'x'] = | pd.to_datetime('NaT') | pandas.to_datetime |
import sqlite3
import time
import pandas as pd
def _format_uri(uri):
if uri.namespace:
return uri.namespace+"#"+uri.value
else:
return uri.value.strip('"')
def _make_table(_conn, tablename, varnames):
c = _conn.cursor()
colnames = []
for varname in varnames:
varname = varname.lstrip('?')
colnames.append( "{0} text".format(varname) )
c.execute("CREATE TABLE {0} ({1}, site text)".format(tablename, ", ".join(colnames)))
return _conn
"""
"""
class Result:
def __init__(self):
"""
The result object helps pymortar build from streaming responses to a query,
and provides an interface to look at both metadata and timeseries data that
is the output of a call to Fetch(...)
"""
# result object has its own sqlite3 in-memory database
self.conn = sqlite3.connect(':memory:')
self._series = {}
self._dataframes = {}
self._df = None
self._dfs = {}
self._tables = {}
def __repr__(self):
numtables = len(self._tables) if self._tables else "n/a"
dataframes = self._dataframes.values()
numcols = sum(map(lambda x: len(x.columns), self._dfs.values()))
numvals = sum(map(lambda x: x.size, self._dfs.values()))
values = [
"views:{0}".format(numtables),
"dataframes:{0}".format(len(dataframes)),
"timeseries:{0}".format(numcols),
"vals:{0}".format(numvals)
]
return "<pymortar.result.Result: {0}>".format(" ".join(values))
def describe_table(self, viewname):
"""
Prints out a description of the table with the provided name
Args:
viewname (str): The name of the view you want to see a description of. A list of views
can be retrieved using Result.views
"""
s = "Columns: {0}".format(' '.join(self._tables.get(viewname, [])))
s += "\nCount: {0}".format(self.query("SELECT COUNT(*) FROM {0}".format(viewname))[0][0])
print(s)
def view_columns(self, viewname):
"""
Returns a Python list of strings corresponding to the column names of the given View
Args:
viewname (str): View name. This will be from the pymortar.View object 'name' field.
List can be retrieved using the Result.views property
Returns:
columns (list of str): the column names for the indicated view
"""
return self._tables.get(viewname, [])
def view(self, viewname, fulluri=False):
"""
Returns a pandas.DataFrame representation of the indicated view. This is presented
as an alternative to writing SQL queries (Result.query)
Args:
viewname (str): View name. This will be from the pymortar.View object 'name' field.
List can be retrieved using the Result.views property
Keyword Args:
fulluri (bool): (default: False) if True, returns the full URI of the Brick value.
This can be cumbersome, so the default is to elide these prefixes
Returns:
df (pandas.DataFrame): a DataFrame containing the results of the View
"""
cols = self.view_columns(viewname)
col_str = ", ".join(cols)
df = pd.DataFrame(self.query("select {0} from {1}".format(col_str, viewname)))
df.columns = cols
if not fulluri:
for col in cols:
df.loc[:, col] = df[col].str.split('#').apply(lambda x: x[-1])
return df
def _add(self, resp):
"""
Adds the next FetchResponse object from the streaming call into
the current Result object
Parameters
----------
resp: FetchResponse
This parameter is a FetchResponse object obtained from
calling the Mortar Fetch() call.
"""
if resp.error != "":
raise Exception(resp.error)
if resp.view not in self._tables and len(resp.variables) > 0:
_make_table(self.conn, resp.view, resp.variables)
self._tables[resp.view] = list(map(lambda x: x.lstrip("?"), resp.variables))
self._tables[resp.view].append("site")
if resp.view in self._tables:
c = self.conn.cursor()
for row in resp.rows:
values = ['"{0}"'.format(_format_uri(u)) for u in row.values]
values.append('"{0}"'.format(resp.site))
c.execute("INSERT INTO {0} values ({1})".format(resp.view, ", ".join(values)))
if resp.identifier and resp.dataFrame:
if resp.dataFrame not in self._dataframes:
self._dataframes[resp.dataFrame] = {}
if resp.identifier not in self._dataframes[resp.dataFrame]:
self._dataframes[resp.dataFrame][resp.identifier] = []
self._dataframes[resp.dataFrame][resp.identifier].append(
pd.Series(resp.values, index=pd.to_datetime(resp.times), name=resp.identifier)
)
def _build(self):
if len(self._dataframes) == 0:
self._df = pd.DataFrame()
return
t = time.time()
for dataframe, timeseries in self._dataframes.items():
timeseries = self._dataframes[dataframe]
for uuidname, contents in timeseries.items():
ser = | pd.concat(contents) | pandas.concat |
from scipy.spatial.distance import cosine
from itertools import islice
import numpy as np
from sklearn import linear_model
from .DatabaseUtils import (Database)
from .Decision import (DecisionTree)
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn import metrics
CRITERION_FIELDS = ['degrees', 'skills', 'schools', 'majors', 'occupations']
def addElementsToList(arr, ls):
for d in arr:
if d not in ls:
ls.append(d)
def ranking(criterions, resumes):
rating = ratePotentialRate()
ranked = resumes
allCriterions = []
# add all criterions to allCriterions
t_criterions = []
for field in CRITERION_FIELDS:
if field in criterions.keys():
addElementsToList(criterions[field], allCriterions)
addElementsToList(allCriterions, t_criterions)
# add criterion from resumes
for rm in resumes:
ls = []
for field in CRITERION_FIELDS:
if field in rm.keys():
addElementsToList(rm[field], ls)
rm['criterions'] = ls
addElementsToList(ls, allCriterions)
# sort criterion list
allCriterions.sort()
p_criterion = [1]*len(allCriterions)
for rm in resumes:
isAnyMatch = False
m_criterion = []
for c in allCriterions:
if c in t_criterions and c in rm['criterions']:
m_criterion.append(1)
isAnyMatch = True
else:
m_criterion.append(0)
if not isAnyMatch:
rm['point'] = 0.0
else:
rm['point'] = round((1 - cosine(m_criterion, p_criterion))*10, 5)
po_criterion = []
for c in rating[2]:
if c in rm['criterions']:
po_criterion.append(1)
else:
po_criterion.append(0)
f = pd.DataFrame([po_criterion])
gr = rating[0].predict(f)
print(rating[1][int(gr[0])].predict(f))
rm['potential'] = round(rating[1][int(gr[0])].predict(f)[0], 5)
rm['selected'] = False
rm['total'] = round(rm['potential'] + rm['point'], 5)
def rk(v):
return v['total']
resumes.sort(reverse=True, key=rk)
return resumes
def ratePotentialRate():
db = Database()
rates = db.getRegularRate().json()
allCriterions = []
for r in rates:
addElementsToList(r['criterions'], allCriterions)
allCriterions.append('point')
rows = []
#create row in matrix
for r in rates:
ro = []
for c in allCriterions:
if c in r['criterions']:
ro.append(1)
else:
ro.append(0)
ro.append(r['point'])
rows.append(ro)
#sort top
def rk(v):
return v[-1]
rows.sort(reverse=True, key=rk)
#split to 3 class
classes = np.array_split(rows, 3)
index = 0;
# add label class to row
n_classes = []
for c in classes:
for r in c:
n_r = r.tolist()
n_r.append(index)
n_classes.append(n_r)
index += 1
df = pd.DataFrame(n_classes)
# # #create tree from 3 class
tree = DecisionTree(max_depth = 30, min_samples_split = 3)
X = df.iloc[:, :-2]
y = df.iloc[:, -1]
tree.fit(X, y)
print(accuracy_score(y, tree.predict(X)))
# #create linear regression for each class
reprList = []
for c in classes:
dff = | pd.DataFrame(c) | pandas.DataFrame |
import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = | _check_plot_works(grouped.boxplot, return_type="axes") | pandas.tests.plotting.common._check_plot_works |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from hash import *
class simulation:
def __init__(self, length=12096, mu=0, sigma=0.001117728,
b_target=10, block_reward=12.5, hash_ubd=55,
hash_slope=3, hash_center=1.5, prev_data=pd.DataFrame(),
T_BCH=144, T_BTC=2016, init_price=5400,
init_winning_rate=0.00003):
'''
Parameters
----------
length: time length of simulation
length = the number of blocks generated in one simulation.
A new block is generated in 10 minutes in expection;
12096 blocks are generated in three months in expectation.
mu: average of the brownian motion
sigma: standard deviation of the brownian motion
b_target: target block time (min) (default: 10 min)
\bar{B}
block_reward:
the amount of cryptocurrency the miner receives when he
adds a block. (default: 12.5)
hash_ubd: the upper bound of global hash rate.
hash_slope, hash_center:
the parameters that affects the shape of hash supply function
prev_data:
a pandas dataframe containing (i) prices, (ii) winning rates,
(iii) hash rates, and (iv) block times.
The number of rows should coincides with T_BCH.
T_BCH: the length of the time window used for DAA of BCH.
T_BTC: the length of the time window used for DAA of BTC.
init_price: the initial price.
init_winning-rate: the inirial winning rate.
Attributes
----------
block_times
prices
winning_rates
hash_rates
optimal_winning_rates
expected_returns
Notes
-----
* As for BTC and BCH, b_target is set to be 10 minutes.
'''
# params
self.mu = mu
self.sigma = sigma
self.b_target = b_target
self.length = length
self.block_reward = block_reward
self.hash_ubd = hash_ubd
self.hash_slope = hash_slope
self.hash_center = hash_center
self.T_BCH = T_BCH
self.T_BTC = T_BTC
if prev_data.empty == True:
self.prev_prices = np.ones(T_BCH) * init_price
self.prev_block_times = np.ones(T_BCH) * b_target
self.prev_winning_rates = np.ones(T_BCH) * init_winning_rate
else:
self.prev_prices = prev_data['prices']
self.prev_block_times = prev_data['block_times']
self.prev_winning_rates = prev_data['winning_rates']
def sim_DAA_1(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
init_height=551443, presim_length=2016, ubd_param=3):
'''
Conduct a simulation using DAA-1 as its DAA.
DAA-1 is based on the DAA used by BTC.
Parameters
----------
prices : exogenously given. price[t] is the price at time 10*t
exprvs : exogenously given; used for computing block times.
opt_w : the oprimal winning rates, computed in advance.
init_height :
the height of the block that is created first
in the simulation. (default: 551443)
presim_length :
the length of periods contained in prev_data.
(Real data used for the pre-simulation period.)
See also __init__.
ubd_param :
determine the maximum number of iterations
See also _initialization.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted
every self.T_BTC periods. In reality, BTC lets T_BTC = 2016.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
if (init_height + t)%self.T_BTC == 0:
self.diff_adjust_BTC(current_period=t)
else:
break
self._postprocessing(period)
return None
def sim_DAA_2(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
presim_length=2016, ubd_param=3):
'''
Conduct a simulation using DAA-2 as its DAA.
DAA-2 is based on the DAA used by BCH.
Parameters
----------
prices: see sim_BTC.
exprvs: see sim_BTC.
presim_length: see sim_BTC.
ubd_param: see sim_BTC.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted every period.
At each adjustment, the last T_BCH blocks are taken into account.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
## different from that of BTC in that
## difficulty adjustment is conducted every period.
self.diff_adjust_BCH(current_period=t)
else:
break
self._postprocessing(period)
return None
def sim_DAA_asert(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
presim_length=2016, ubd_param=3, half_life=2880):
'''
Conduct a simulation using DAA-2 as its DAA.
DAA-2 is based on the DAA used by BCH.
Parameters
----------
prices: see sim_BTC.
exprvs: see sim_BTC.
presim_length: see sim_BTC.
ubd_param: see sim_BTC.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted every period.
At each adjustment, the last T_BCH blocks are taken into account.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
# W(t+1)
## different from that of BTC in that
## difficulty adjustment is conducted every period.
self.diff_adjust_asert(current_period=t, half_life=half_life)
else:
break
self._postprocessing(period)
return None
def sim_DAA_0(self, prices=pd.DataFrame(), exprvs=pd.DataFrame(),
df_opt_w=pd.DataFrame(),
init_height=551443, presim_length=2016, ubd_param=3):
'''
Conduct a simulation where the difficulty is always adjusted
to the optimal level. (imaginary DAA)
Parameters
----------
prices : exogenously given. price[t] is the price at time 10*t
exprvs : exogenously given; used for computing block times.
opt_w :
init_height :
the height of the block that is created first
in the simulation. (default: 551443)
presim_length :
the length of periods contained in prev_data.
(Real data used for the pre-simulation period.)
See also __init__.
ubd_param :
determine the maximum number of iterations
See also _initialization.
Returns
-------
None
Notes
-----
Difficulty, or winning_rate W(t), is adjusted
every self.T_BTC periods. In reality, BTC lets T_BTC = 2016.
'''
if prices.empty == True:
prices = self.generate_prices()
if exprvs.empty == True:
exprvs = self.generate_exprvs()
# initialization
## period 0 to period (presim_length - 1): pre-simulation period
self._initialization(ubd_param)
# main loop
## See what happens within self.length*self.b_target minutes
## default: 12096*10 min = 12 weeks = 3 month
time_ubd = self.length * self.b_target
time = 0
period = presim_length-1
for t in range(presim_length-1, self.length*ubd_param+presim_length-1):
# S(t), W(t) is given
# W^*(t)
## W(t) = W^*(t)
price_truncated = self.prices[t]
price_truncated = (price_truncated//50)*50 # grid size = 50
price_truncated = int(np.max([np.min([price_truncated, 11000]), 100])) # max 11000
self.optimal_winning_rates[t] =\
df_opt_w.loc[price_truncated, 'opt_w']
self.winning_rates[t] = self.optimal_winning_rates[t]
# R(t) = S(t) * M * W(t)
self.expected_rewards[t] =\
self.winning_rates[t] * self.block_reward * self.prices[t]
# hash rate H(t) <- W(t), S(t)
self.hash_rates[t] = self.hash_supply(t)
# block time B(t) <- H(t), W(t)
# multiply 60 to rescale time unit from second to minute
self.block_times[t] = \
exprvs[t]/ \
(self.hash_rates[t] * self.winning_rates[t] * 60)
time += self.block_times[t]
period += 1
if time < time_ubd:
# S(t+1)
self.compute_price(current_period=t, current_time=time,
prices=prices)
else:
break
self._postprocessing(period)
return None
def compute_price(self, current_period, current_time, prices):
'''
Compute the price at the time when the (t+1)-th block is created:
compute S(t+1) using price data via linear interpolation.
prices contains the price date recorded every 10 minutes.
'''
time_left = int(current_time//self.b_target)
time_right = time_left + 1
self.prices[current_period+1] = \
prices[time_left] + (prices[time_right] - prices[time_left]) * \
((current_time - time_left*self.b_target)/self.b_target)
return None
def diff_adjust_BTC(self, current_period):
'''
Used by sim_DAA-1.
Modify self.winning_rates in place.
'''
multiplier = \
(self.block_times[current_period-self.T_BTC+1:\
current_period+1].sum() / (self.T_BTC * self.b_target))
self.winning_rates[current_period+1:current_period+self.T_BTC+1] = \
self.winning_rates[current_period] * multiplier
return None
def diff_adjust_BCH(self, current_period):
'''
Used by sim_DAA_2.
Modify self.winning_rates in place.
'''
# the term related to B(t)
block_term = \
(self.block_times[current_period-self.T_BCH+1: \
current_period+1].sum() / self.b_target)
# the term related to W(t)
temp = np.ones(self.T_BCH)
w_inverses = temp / (self.winning_rates[current_period-self.T_BCH+1: \
current_period+1])
winning_prob_term = 1 / w_inverses.sum()
# update W(t)
self.winning_rates[current_period+1] = \
block_term * winning_prob_term
return None
def diff_adjust_asert(self, current_period, half_life=2880):
'''
Used by sim_DAA_asert.
Modify self.winning_rates in place.
'''
temp = (self.block_times[current_period] - self.b_target)/half_life
# update W(t)
self.winning_rates[current_period+1] = \
self.winning_rates[current_period] * np.exp(temp)
return None
def hash_supply(self, current_period):
'''
Compute hash supply in current period (EH)
'''
current_exp_reward = \
(self.prices[current_period] * self.winning_rates[current_period]
* self.block_reward)
return self.hash_ubd * \
self._sigmoid(self.hash_slope *
(current_exp_reward - self.hash_center))
def _sigmoid(self, x):
sigmoid_range = 34.538776394910684
if x <= -sigmoid_range:
return 1e-15
if x >= sigmoid_range:
return 1.0 - 1e-15
return 1.0 / (1.0 + np.exp(-x))
def _initialization(self, ubd_param, presim_length=2016):
# the number of iteration cannot exceeds self.length * self.ubd_param
sim_length_ubd = self.length * ubd_param
self.prices = np.zeros((sim_length_ubd,)) # S(t)
self.winning_rates = np.zeros((sim_length_ubd,)) # W(t)
self.block_times = np.zeros((sim_length_ubd,)) # B(t)
self.hash_rates = np.zeros((sim_length_ubd,)) #H(t)
self.optimal_winning_rates = np.zeros((sim_length_ubd,)) #W^*(t)
self.expected_rewards = np.zeros((sim_length_ubd,)) #R(t)
# add pre-simulation periods
self.prices = np.hstack([self.prev_prices, self.prices])
self.block_times = \
np.hstack([self.prev_block_times, self.block_times])
self.winning_rates = \
np.hstack([self.prev_winning_rates, self.winning_rates])
## for BTC, set the winning rates
self.winning_rates[presim_length:presim_length+self.T_BTC] = \
self.winning_rates[presim_length-1]
## hash rates in pre-simulation periods will not be used
## The same is true of opt_win_rate and exp_returns
_ = np.zeros(presim_length) + self.hash_supply(presim_length-1) # may be redundant
self.hash_rates = np.hstack([_, self.hash_rates])
_ = np.zeros(presim_length)
self.optimal_winning_rates = np.hstack([_, self.optimal_winning_rates])
self.expected_rewards = np.hstack([_, self.expected_rewards])
return None
def _postprocessing(self, period, presim_length=2016):
self.block_times = self.block_times[presim_length:period]
self.prices = self.prices[presim_length:period]
self.winning_rates = self.winning_rates[presim_length:period]
self.hash_rates = self.hash_rates[presim_length:period]
self.optimal_winning_rates =\
self.optimal_winning_rates[presim_length:period]
self.expected_rewards = self.expected_rewards[presim_length:period]
return None
# Functions
def generate_simulation_data(num_iter=3, price_shock=0, T=None,
opt_w=pd.DataFrame(), prev_data=pd.DataFrame(),
dir_sim='/Volumes/Data/research/BDA/simulation/'):
'''
Notes
-----
num_iter is a number of observations.
The price data 'sim_prices_ps={}_5000obs.csv'.format(price_shock) should
be created in advance.
If T is specified, T_BTC <- T and T_BCH <- T.
'''
df_exprvs = pd.read_csv(dir_sim+'sim_exprvs_5000obs.csv')
df_price = pd.read_csv(dir_sim+'sim_prices_ps={}_5000obs.csv'\
.format(price_shock))
df_opt_w = pd.read_csv(dir_sim + 'opt_w.csv', index_col=0)
path = '../data/BTCdata_presim.csv'
prev_data = pd.read_csv(path)
prev_data['time'] = pd.to_datetime(prev_data['time'])
prev_data = prev_data.rename(columns={'blocktime': 'block_times', 'price': 'prices', 'probability of success /Eh': 'winning_rates'})
df_DAA_1_blocktime = pd.DataFrame()
df_DAA_1_hashrate = pd.DataFrame()
df_DAA_1_winrate = pd.DataFrame()
df_DAA_1_optwinrate = pd.DataFrame()
df_DAA_1_expreward = pd.DataFrame()
df_DAA_2_blocktime = pd.DataFrame()
df_DAA_2_hashrate = pd.DataFrame()
df_DAA_2_winrate = pd.DataFrame()
df_DAA_2_optwinrate = pd.DataFrame()
df_DAA_2_expreward = pd.DataFrame()
df_DAA_0_blocktime = pd.DataFrame()
df_DAA_0_hashrate = pd.DataFrame()
df_DAA_0_winrate = pd.DataFrame()
df_DAA_0_optwinrate = pd.DataFrame()
df_DAA_0_expreward = pd.DataFrame()
if T:
T_BTC = T
T_BCH = T
else:
T_BTC = 2016
T_BCH = 144
sim = simulation(prev_data=prev_data, T_BTC=T_BTC, T_BCH=T_BCH)
for iter in range(num_iter):
prices = df_price.loc[:, 'iter_{}'.format(iter)]
exprvs = df_exprvs.loc[:, 'iter_{}'.format(iter)]
# DAA-1
_blocktime = pd.DataFrame()
_hashrate = pd.DataFrame()
_winrate = pd.DataFrame()
_optwinrate = pd.DataFrame()
_expreward = pd.DataFrame()
sim.sim_DAA_1(prices=prices, exprvs=exprvs, df_opt_w=df_opt_w)
_blocktime['iter_{}'.format(iter)] = sim.block_times
_hashrate['iter_{}'.format(iter)] = sim.hash_rates
_winrate['iter_{}'.format(iter)] = sim.winning_rates
_optwinrate['iter_{}'.format(iter)] = sim.optimal_winning_rates
_expreward['iter_{}'.format(iter)] = sim.expected_rewards
df_DAA_1_blocktime = pd.concat([df_DAA_1_blocktime, _blocktime], axis=1)
df_DAA_1_hashrate = pd.concat([df_DAA_1_hashrate, _hashrate], axis=1)
df_DAA_1_winrate = pd.concat([df_DAA_1_winrate, _winrate], axis=1)
df_DAA_1_optwinrate = pd.concat([df_DAA_1_optwinrate, _optwinrate], axis=1)
df_DAA_1_expreward = pd.concat([df_DAA_1_expreward, _expreward], axis=1)
# DAA-2
_blocktime = pd.DataFrame()
_hashrate = pd.DataFrame()
_winrate = pd.DataFrame()
_optwinrate = pd.DataFrame()
_expreward = pd.DataFrame()
sim.sim_DAA_2(prices=prices, exprvs=exprvs, df_opt_w=df_opt_w)
_blocktime['iter_{}'.format(iter)] = sim.block_times
_hashrate['iter_{}'.format(iter)] = sim.hash_rates
_winrate['iter_{}'.format(iter)] = sim.winning_rates
_optwinrate['iter_{}'.format(iter)] = sim.optimal_winning_rates
_expreward['iter_{}'.format(iter)] = sim.expected_rewards
df_DAA_2_blocktime = pd.concat([df_DAA_2_blocktime, _blocktime], axis=1)
df_DAA_2_hashrate = pd.concat([df_DAA_2_hashrate, _hashrate], axis=1)
df_DAA_2_winrate = pd.concat([df_DAA_2_winrate, _winrate], axis=1)
df_DAA_2_optwinrate = pd.concat([df_DAA_2_optwinrate, _optwinrate], axis=1)
df_DAA_2_expreward = pd.concat([df_DAA_2_expreward, _expreward], axis=1)
df_DAA_1_blocktime.to_csv(dir_sim+'DAA-1_blocktime_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_hashrate.to_csv(dir_sim+'DAA-1_hashrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_winrate.to_csv(dir_sim+'DAA-1_winrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_optwinrate.to_csv(dir_sim+'DAA-1_optwinrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_1_expreward.to_csv(dir_sim+'DAA-1_expreward_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_blocktime.to_csv(dir_sim+'DAA-2_blocktime_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_hashrate.to_csv(dir_sim+'DAA-2_hashrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_winrate.to_csv(dir_sim+'DAA-2_winrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_optwinrate.to_csv(dir_sim+'DAA-2_optwinrate_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
df_DAA_2_expreward.to_csv(dir_sim+'DAA-2_expreward_ps{}_{}obs_T={}'\
.format(price_shock, num_iter, T)+'.csv')
return None
def generate_simulation_data_DAA0(num_iter=3, price_shock=0,
opt_w=pd.DataFrame(), prev_data=pd.DataFrame(),
dir_sim='/Volumes/Data/research/BDA/simulation/'):
'''
Notes
-----
num_iter is a number of observations.
The price data 'sim_prices_ps={}_5000obs.csv'.format(price_shock) should
be created in advance.
'''
df_exprvs = pd.read_csv(dir_sim+'sim_exprvs_5000obs.csv')
df_price = pd.read_csv(dir_sim+'sim_prices_ps={}_5000obs.csv'\
.format(price_shock))
df_opt_w = pd.read_csv(dir_sim + 'opt_w.csv', index_col=0)
path = '../data/BTCdata_presim.csv'
prev_data = pd.read_csv(path)
prev_data['time'] = pd.to_datetime(prev_data['time'])
prev_data = prev_data.rename(columns={'blocktime': 'block_times', 'price': 'prices', 'probability of success /Eh': 'winning_rates'})
df_DAA_0_blocktime = pd.DataFrame()
df_DAA_0_hashrate = pd.DataFrame()
df_DAA_0_winrate = pd.DataFrame()
df_DAA_0_optwinrate = pd.DataFrame()
df_DAA_0_expreward = pd.DataFrame()
sim = simulation(prev_data=prev_data)
for iter in range(num_iter):
prices = df_price.loc[:, 'iter_{}'.format(iter)]
exprvs = df_exprvs.loc[:, 'iter_{}'.format(iter)]
# DAA-0
_blocktime = pd.DataFrame()
_hashrate = pd.DataFrame()
_winrate = pd.DataFrame()
_optwinrate = pd.DataFrame()
_expreward = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import json
import pdb
import csv
from collections import OrderedDict
import os.path
from pathlib import Path
from datetime import datetime
import re
import pandas as pd
import sys
import settings
import scrape_schedule
def CalcPercent(total, skip, correct):
try:
return round(correct / (total - skip) * 100., 2)
except ZeroDivisionError:
return None
def GetPercent(item):
newstr = item.replace("%", "")
newstr = newstr.replace("?", "")
if (newstr.strip()==""):
return -1
return float(newstr)
def GetIndex(item):
filename = os.path.basename(str(item))
idx = re.findall(r'\d+', str(filename))
if (len(idx) == 0):
idx.append("-1")
return int(idx[0])
def GetFiles(path, templatename):
A = []
files = Path(path).glob(templatename)
for p in files:
A.append(p)
file_list = []
for item in range(0, 19):
file_list.append("?")
for item in A:
idx = GetIndex(item)
if (len(file_list) > idx):
file_list[idx] = item
file_list = [x for x in file_list if x != "?"]
return file_list
def CurrentScheduleFiles(filename):
stat = os.path.getmtime(filename)
stat_date = datetime.fromtimestamp(stat)
if stat_date.date() < datetime.now().date():
return False
return True
def RefreshScheduleFiles():
now = datetime.now()
year = int(now.year)
scrape_schedule.year = year
scrape_schedule.main(sys.argv[1:])
def GetActualScores(abbra, teama, abbrb, teamb, scores):
items = re.split(r'(,|\s)\s*', str(scores).lower())
if (not items):
return -1, -1
if (items[0].strip() == "canceled"):
return -3, -3
if (items[0].strip() == "postponed"):
return -2, -2
if (items[0].strip() == "?"): # not yet Played Game
return -1, -1
ot = -1
if (len(items) == 9 and "ot)" in items[8]):
# overtime case
ot += 1
elif (len(items) != 7):
return -1, -1
if (abbra.lower().strip() not in items and abbrb.lower().strip() not in items):
return -1, -1
if (abbra.lower().strip() == items[0].lower().strip()):
scorea = int(items[2])
scoreb = int(items[6])
else:
scorea = int(items[6])
scoreb = int(items[2])
return scorea, scoreb
now = datetime.now()
saved_path = "{0}{1}/{2}".format(settings.predict_root, int(now.year), settings.predict_saved)
sched_path = "{0}{1}/{2}".format(settings.predict_root, int(now.year), settings.predict_sched)
verbose = False
if (len(sys.argv)==2):
verbose = True
print ("Measure Actual Results Tool")
print ("**************************")
Path(sched_path).mkdir(parents=True, exist_ok=True)
RefreshScheduleFiles()
file = '{0}sched1.json'.format(sched_path)
if (not os.path.exists(file)):
if (verbose):
print ("schedule files are missing, run the scrape_schedule tool to create")
exit()
Path(saved_path).mkdir(parents=True, exist_ok=True)
file = '{0}week1.csv'.format(saved_path)
if (not os.path.exists(file)):
if (verbose):
print ("Weekly files are missing, run the score_week tool to create")
exit()
sched_files = GetFiles(sched_path, "sched*.json")
list_sched = []
for file in sched_files:
with open(file) as sched_file:
item = json.load(sched_file, object_pairs_hook=OrderedDict)
item['Week'] = GetIndex(file)
list_sched.append(item)
week_files = GetFiles(saved_path, "week*.csv")
list_week = []
for file in week_files:
with open(file) as week_file:
reader = csv.DictReader(week_file)
for row in reader:
row['Week'] = GetIndex(file)
list_week.append(row)
IDX=[]
A=[]
B=[]
C=[]
D=[]
E=[]
index = 0
alltotal = 0
allskip = 0
allcorrect = 0
count = 0
for idx in range(len(list_sched)):
total = 0
skip = 0
correct = 0
week = list_sched[idx]["Week"]
for item in list_sched[idx].values():
if (item == week):
break
total += 1
chancea = -1
abbra = ""
abbrb = ""
teama = ""
teamb = ""
if (index < len(list_week) and list_week[index]["Week"] == week):
chancea = GetPercent(list_week[index]["ChanceA"])
chanceb = GetPercent(list_week[index]["ChanceB"])
abbra = list_week[index]["AbbrA"]
abbrb = list_week[index]["AbbrB"]
teama = list_week[index]["TeamA"]
teamb = list_week[index]["TeamB"]
index += 1
scorea, scoreb = GetActualScores(abbra, teama, abbrb, teamb, item["Score"])
if ((int(chancea) == 0 and int(chanceb) == 0) or scorea < 0 or scoreb < 0):
if (teama != "" and teamb != "" and "tickets" not in item["Score"]):
if (item["Score"].lower() == "canceled"):
print ("***\nGame skipped\n\n\t[{0} vs {1}] \n\tabbreviation(s) [{2}] [{3}] Score {4}\n\tcanceled\n***\n"
.format(teama, teamb, abbra, abbrb, item["Score"]))
elif (item["Score"].lower() == "postponed"):
print ("***\nGame skipped\n\n\t[{0} vs {1}] \n\tabbreviation(s) [{2}] [{3}] Score {4}\n\tpostponed\n***\n"
.format(teama, teamb, abbra, abbrb, item["Score"]))
else:
if (item["Score"] != "?"):
print ("***\nGame skipped\n\n\t[{0} vs {1}] \n\tabbreviation(s) [{2}] [{3}] Score {4}\n\treview your merge files\n***\n".format(teama, teamb, abbra, abbrb, item["Score"]))
skip += 1
else:
if (chancea >= 50 and (scorea >= scoreb)):
correct += 1
if (chancea < 50 and (scorea < scoreb)):
correct += 1
count += 1
IDX.append(count)
A.append(week)
B.append(total)
C.append(skip)
D.append(correct)
E.append(CalcPercent(total, skip, correct))
print ("week{0} total={1}, skip={2}, correct={3} Percent={4}%".format(week, total, skip, correct, CalcPercent(total, skip, correct)))
alltotal = alltotal + total
allskip = allskip + skip
allcorrect = allcorrect + correct
count += 1
IDX.append(count)
A.append(99)
B.append(alltotal)
C.append(allskip)
D.append(allcorrect)
E.append(CalcPercent(alltotal, allskip, allcorrect))
print ("====================================================================")
print ("Totals total={0}, skip={1}, correct={2} Percent={3}%".format(alltotal, allskip, allcorrect, CalcPercent(alltotal, allskip, allcorrect)))
print ("====================================================================")
df= | pd.DataFrame(IDX,columns=['Index']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 11 18:48:42 2017
@author: chrzq
"""
import gdax
import pandas as pd
class GdaxAccount(object):
def __init__(self, key, pp, sk):
"""
"""
self.key = key
self.pp = pp
self.sk = sk
self.client = None
def connect_account(self):
"""
"""
self.client = gdax.AuthenticatedClient(self.key, self.sk, self.pp)
def get_holdings(self):
"""
"""
if not(self.client):
self.connect_account()
print("I connected")
lAccts = self.client.get_accounts()
dfAcct = pd.DataFrame(lAccts)
for c in ['balance', 'available', 'hold']:
dfAcct[c] = pd.to_numeric(dfAcct[c])
self.holdings = dfAcct
self.acctIdMap = self.holdings.set_index('currency')['id'].to_dict()
def _accthist_to_df(self, lRet):
lMaster = []
for lSubRet in lRet:
for dEntry in lSubRet:
dPEntry = {}
dDetails = dEntry.pop('details')
dPEntry = {**dEntry, **dDetails}
lMaster.append(dPEntry)
return pd.DataFrame(lMaster)
def generate_master(self):
"""
"""
if type(self.acctIdMap) == None:
self.get_holdings()
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.core.broadcaster import Broadcaster
foo_bar_series = pd.Series({'foo': 1.0, 'bar': 2.0})
foo_bar_series_twice_in_frame = pd.DataFrame([foo_bar_series, foo_bar_series])
series_named_index = foo_bar_series.copy()
series_named_index.index.name = 'idx1'
foo_bar_frame = pd.DataFrame({'foo': [1.0, 1.5], 'bar': [2.0, 1.5]})
def test_broadcast_series_to_array():
param, obj = Broadcaster(foo_bar_series).broadcast([1.0, 2.0])
pd.testing.assert_series_equal(param, pd.Series([1.0, 2.0]))
pd.testing.assert_frame_equal(foo_bar_series_twice_in_frame, obj)
def test_broadcast_frame_to_array_match():
param, obj = Broadcaster(foo_bar_frame).broadcast([1.0, 2.0])
np.testing.assert_array_equal(param, [1.0, 2.0])
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_frame_to_array_mismatch():
with pytest.raises(ValueError, match=r"Dimension mismatch. "
"Cannot map 3 value array-like to a 2 element DataFrame signal."):
Broadcaster(foo_bar_frame).broadcast([1.0, 2.0, 3.0])
def test_broadcast_series_to_scalar():
param, obj = Broadcaster(foo_bar_series).broadcast(1.0)
assert param == 1.0
pd.testing.assert_series_equal(foo_bar_series, obj)
def test_broadcast_frame_to_scalar():
param, obj = Broadcaster(foo_bar_frame).broadcast(1.0)
expected_param = pd.Series([1.0, 1.0], index=foo_bar_frame.index)
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_series_index_named_to_series_index_named():
series = pd.Series([5.0, 6.0], index=pd.Index(['x', 'y'], name='idx2'))
param, obj = Broadcaster(series_named_index).broadcast(series)
expected_param = pd.Series({
('foo', 'x'): 5.0,
('foo', 'y'): 6.0,
('bar', 'x'): 5.0,
('bar', 'y'): 6.0
})
expected_obj = pd.Series({
('foo', 'x'): 1.0,
('foo', 'y'): 1.0,
('bar', 'x'): 2.0,
('bar', 'y'): 2.0
})
expected_obj.index.names = ['idx1', 'idx2']
expected_param.index.names = ['idx1', 'idx2']
| pd.testing.assert_series_equal(expected_param, param) | pandas.testing.assert_series_equal |
################################################################################
#
# Delaunay density diangostic for MSD and grad-MSD rates
# as described in the paper
# Data-driven geometric scale detection via Delaunay interpolation
# by <NAME> and <NAME>
# Version 1.0, March 2022
#
# For usage information, run:
# python delaunay_density_diagnostic.py --help
#
################################################################################
#==================================================================================================#
# Load packages. Set random state and validation split.
#==================================================================================================#
# from matplotlib.pyplot import legend
# import torch
import pandas as pd
# from torch.autograd import Variable
# import torch.nn.functional as F
# import torch.utils.data as Data
# from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
from numpy.random import rand, default_rng
from numpy import arccos, array, degrees, absolute
from numpy.linalg import norm
from optparse import OptionParser
# import numpy.ma as ma
# import xarray as xr
from sys import exit
import os.path
import copy
import delsparse
from delsparse import delaunaysparsep as dsp
#==================================================================================================#
# Define the test function (hard coded here as the Griewank function)
#==================================================================================================#
def tf(X): # Griewnak function, arbitrary dimension input
X = X.T
term_1 = (1. / 4000.) * sum(X ** 2)
term_2 = 1.0
for i, x in enumerate(X):
term_2 *= np.cos(x) / np.sqrt(i + 1)
return 1. + term_1 - term_2
# use a paraboloid instead:
# return (7/20_000) * ( X[0]**2 + 0.5*(X[1]**2) )
#==================================================================================================#
# Make query point lattice in R^dim
#==================================================================================================#
def make_test_data_grid(rng, static_data=False):
num_samples_per_dim = options.numtestperdim
x = np.linspace(options.queryleftbound, options.queryrightbound, num_samples_per_dim)
print("===> Test coordinates for each dimension = ", x)
mg_in = []
for i in range(options.dim):
mg_in.append(x)
grid_pts = np.array(np.meshgrid(*mg_in))
grid_pts = grid_pts.reshape(options.dim, num_samples_per_dim ** options.dim)
grid_pts = grid_pts.T
outputs_on_grid = tf(grid_pts)
data_test_inputs = pd.DataFrame(grid_pts)
data_test_outputs = pd.DataFrame(outputs_on_grid)
return data_test_inputs, data_test_outputs
#==================================================================================================#
# Collect random sample from bounding box
#==================================================================================================#
def make_random_training_in_box(rng):
train_set_size = options.numtrainpts
# print("==> Generating ", train_set_size, " random points.")
rand_pts_n = rng.random((train_set_size, options.dim))
train_box_scale_vector = np.full(options.dim, (options.bboxrightbound - options.bboxleftbound) )
train_box_shift_vector = np.full(options.dim, options.bboxleftbound )
# do scaling in each dim first
for i in range(options.dim):
rand_pts_n[:,i] *= train_box_scale_vector[i]
# then do shifts
for i in range(options.dim):
rand_pts_n[:,i] += train_box_shift_vector[i]
outputs_on_rand_n = tf(rand_pts_n)
data_train_inputs = | pd.DataFrame(rand_pts_n) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import time
import random
random.seed(67)
import numpy as np
np.random.seed(67)
import pandas as pd
import tensorflow as tf
tf.set_random_seed(67)
from sklearn.utils import shuffle
from sklearn.metrics import log_loss, roc_auc_score
from tqdm import tqdm, trange
from model import Model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_epochs', 30, "")
tf.app.flags.DEFINE_integer('batch_size', 128, "")
def main(_):
df_train = pd.read_csv('data/train_data.csv')
df_valid = pd.read_csv('data/valid_data.csv')
df_test = | pd.read_csv('data/test_data.csv') | pandas.read_csv |
import pandas as pd
import os
from collections import namedtuple
from strategy.strategy import Exposures, Portfolio
from strategy.rebalance import get_relative_to_expiry_instrument_weights, \
get_relative_to_expiry_rebalance_dates, get_fixed_frequency_rebalance_dates
from strategy.calendar import get_mtm_dates
def make_container(holdings, trades, pnl):
container = namedtuple("sim_result", ["holdings", "trades", "pnl"])
return container(holdings, trades, pnl)
def make_exposures(root_generics, meta_fp, market_fp):
return Exposures.from_folder(meta_fp, market_fp, root_generics)
def make_portfolio(exposures, sd, ed, capital, offset, all_monthly=False,
holidays=None):
rebal_dts = get_relative_to_expiry_rebalance_dates(
sd, ed, exposures.expiries, offset, all_monthly=all_monthly
)
exchanges = exposures.meta_data.loc["exchange", :].unique()
mtm_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
root_generics = exposures.future_root_and_generics
wts = get_relative_to_expiry_instrument_weights(
mtm_dates, root_generics, exposures.expiries, offset,
all_monthly=all_monthly
)
portfolio = Portfolio(
exposures, rebal_dts, mtm_dates, wts, initial_capital=capital
)
return portfolio
def make_frequency_portfolio(frequency, offset, exposures, sd, ed, capital,
holidays=None):
rebal_dts = get_fixed_frequency_rebalance_dates(
sd, ed, frequency, offset
)
wts = {}
exchanges = exposures.meta_data.loc["exchange", :].unique()
mtm_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
portfolio = Portfolio(
exposures, rebal_dts, mtm_dates, wts, initial_capital=capital
)
return portfolio
def make_signal(portfolio):
asts = portfolio.future_generics + portfolio.equities
dates = portfolio.rebalance_dates
signal = pd.DataFrame(1, index=dates, columns=asts)
return signal
def get_notionals(risk_target, capital, signals, prices, multipliers,
discrete):
if discrete:
def calc(sig, price, mult):
return round(sig * risk_target * capital / (price * mult)) * price * mult # NOQA
else:
def calc(sig, price, mult):
return sig * risk_target * capital * price * mult
notionals = []
for s_i, p_i, m_i in zip(signals, prices, multipliers):
notionals.append(calc(s_i, p_i, m_i))
return notionals
def read_futures_instr(data_path, instr):
fn = os.path.join(data_path, instr[:2], instr + ".csv")
data = pd.read_csv(fn, parse_dates=True, index_col=0)
data = data.Settle
data.sort_index(inplace=True)
return data
def splice_futures_and_pnl(data_path, instr_sd_ed):
# instr_sd_ed is a list of tuples,
# e.g. [("ESH2015", sd, ed1), ("ESM2015", ed2)], only sd is given for
# first contract, assummed consecutive afterwards
MULTS = {"ES": 50, "TY": 1000}
prices = []
pnls = []
instr, sd, ed = instr_sd_ed[0]
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
price = read_futures_instr(data_path, instr)
price = price.loc[sd:ed]
# drop NaN at start
pnls.append(price.diff().iloc[1:])
# since holdings on rebalance day are post rebalance holdings
prices.append(price.iloc[:-1])
sd = ed
for i, instr_ed in enumerate(instr_sd_ed[1:]):
instr, ed = instr_ed
ed = pd.Timestamp(ed)
price = read_futures_instr(data_path, instr)
price = price.loc[sd:ed]
# drop NaN at start
pnls.append(price.diff().iloc[1:])
# check for last element
if i < (len(instr_sd_ed[1:]) - 1):
prices.append(price.iloc[:-1])
else:
prices.append(price)
sd = ed
prices = | pd.concat(prices, axis=0) | pandas.concat |
import streamlit as st
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
import plotly.express as px
from sklearn.metrics import mean_squared_error, r2_score
#Título
st.title("Alugueis de apartamentos - Natal/RN")
st.write("A base de dados foi coletada a partir do site do Viva Real")
# Função para ler o arquivo contendo o dataset
def load_data():
return | pd.read_csv("Webapp/data_features.csv") | pandas.read_csv |
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import pdb
import numpy as np
#this needs to be refactored to be an actual class.
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis"""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for sp in ax.spines.values():
sp.set_visible(False)
def plot_lines(series_list, regulator_labels, target_labels, window_size, suffix=""):
figure2 = plt.figure()
lineplot = figure2.add_subplot(1,1,1)
lineplot.set_xlabel('start day')
lineplot.set_ylabel('Beta')
lines = []
time = [x for x in range(0,22-window_size)]
label_list = []
for counter,series in enumerate(series_list):
my_label = str(regulator_labels[counter]+" -> "+target_labels[counter])
label_list.append(my_label)
line, = lineplot.plot(time, series, label = my_label)
lines.append(line)
figure2.legend(lines,label_list)
figure2.savefig('line_figure'+str(window_size)+suffix+'.png')
#generating a heatmap figure
def generate_heatmap_from_df(raw_data):
""" returns heatmap figure and axes """
time_vector = raw_data['Time'].unique()
nrow,ncol=raw_data.shape
nrepeats = nrow/len(time_vector)
#create group assignment
groups = [x for x in range(0,nrepeats) for i in range(0,len(time_vector))]
raw_data['Group'] = groups
sorted = raw_data.sort(['Time','Group'])
heatmap_values = | pd.DataFrame() | pandas.DataFrame |
import statistics
import time
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import decomposition
from sklearn import preprocessing
from sklearn import cluster
from sklearn import linear_model
from sklearn import ensemble
from sklearn import cross_validation
from sklearn.metrics import mean_absolute_error
def get_actual_y(data):
return data.groupby('Id').mean()[['Expected']]
def simplest_predictions(train, test):
# Build simplest model for reference
median_predictions = get_actual_y(test)
median_predictions['Expected'] = train['Expected'].median()
return median_predictions
# Kaggle example
def marshall_palmer(ref, minutes_past):
#print "Estimating rainfall from {0} observations".format(len(minutes_past))
# how long is each observation valid?
valid_time = np.zeros_like(minutes_past)
valid_time[0] = minutes_past.iloc[0]
for n in range(1, len(minutes_past)):
valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
# sum up rainrate * validtime
sum = 0
for dbz, hours in zip(ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
return sum
# Kaggle example
# each unique Id is an hour of data at some gauge
def myfunc(hour):
#rowid = hour['Id'].iloc[0]
# sort hour by minutes_past
hour = hour.sort('minutes_past', ascending=True)
est = marshall_palmer(hour['Ref'], hour['minutes_past'])
return est
def cluster_data(train_raw, test_raw):
# Normalize before building PCA components
cluster_size = 7
train = train_raw.fillna(-1)
test = test_raw.fillna(-1)
train_norm = preprocessing.scale(train.loc[:,['Ref','RefComposite','RhoHV','Zdr','Kdp']])
pca = decomposition.PCA(n_components=5).fit(train_norm)
train_pca = pca.transform(train_norm)
# Cluster measurements based on PCA components
clusterer = cluster.KMeans(n_clusters=cluster_size, n_init=15, max_iter=300, init='k-means++').fit(train_pca)
train_categories = clusterer.predict(train_pca)
train_dummies = pd.get_dummies(train_categories)
col_names = []
for i in range(0,cluster_size):
col_names.append('cat' + str(i))
train_dummies.columns = col_names
train_dummies.set_index(train.index, inplace=True)
train_dummies['Id'] = train_raw['Id']
train_raw = pd.concat([train_raw, train_dummies.drop('Id', axis=1)], axis=1)
test_norm = preprocessing.scale(test.loc[:,['Ref','RefComposite','RhoHV','Zdr','Kdp']])
test_pca = pca.transform(test_norm)
test_dummies = pd.get_dummies(clusterer.predict(test_pca))
test_dummies.columns = col_names
test_dummies.set_index(test.index, inplace=True)
test_dummies['Id'] = test_raw['Id']
test_raw = pd.concat([test_raw, test_dummies.drop('Id', axis=1)], axis=1)
return [train_raw, test_raw]
def predict(train, test):
predictions = get_actual_y(test)
predictions['Expected'] = train['Expected'].median()
# train, test = cluster_data(train, test)
# Group data by id
train = prep_and_filter_data(train)
test = prep_and_filter_data(test)
# Random Forest using all data
full_tree_train_data = train.dropna()
full_tree_test_data = test.dropna()
model = ensemble.RandomForestRegressor(n_estimators=N_EST, max_depth=MAX_D, n_jobs=-1, min_samples_split=MIN_LEAF, max_features=MAX_FEATURES, criterion="mae")
full_tree_test_data['predictions'] = model.fit(X=full_tree_train_data[full_tree_train_data.columns.difference(['Id','Expected'])], y=full_tree_train_data['Expected']).predict(X=full_tree_test_data[full_tree_test_data.columns.difference(['Id','Expected'])])
# Random Forest using only means
partial_tree_train_data = train[train.count(1) < 45][train['Ref_mean'].notnull()][train['RhoHV_mean'].notnull()][train['Zdr_mean'].notnull()][train['Kdp_mean'].notnull()]
partial_tree_train_data = partial_tree_train_data.loc[:,['Ref_mean','RhoHV_mean','Zdr_mean','Kdp_mean','Expected']].copy()
partial_tree_test_data = test[test.count(1) < 45][test['Ref_mean'].notnull()][test['RhoHV_mean'].notnull()][test['Zdr_mean'].notnull()][test['Kdp_mean'].notnull()]
partial_tree_test_data = partial_tree_test_data.loc[:,['Ref_mean','RhoHV_mean','Zdr_mean','Kdp_mean','Expected']].copy()
partial_model = ensemble.RandomForestRegressor(n_estimators=N_EST, max_depth=MAX_D, n_jobs=-1, min_samples_split=MIN_LEAF, max_features='auto', criterion="mae")
partial_tree_test_data['predictions'] = partial_model.fit(X=partial_tree_train_data[partial_tree_train_data.columns.difference(['Id','Expected'])], y=partial_tree_train_data['Expected']).predict(X=partial_tree_test_data[partial_tree_test_data.columns.difference(['Id','Expected'])])
for i in partial_tree_test_data.index:
predictions.loc[i,'Expected'] = partial_tree_test_data.loc[i,'predictions']
predictions.loc[full_tree_test_data.index,'Expected'] = full_tree_test_data.loc[:,'predictions']
return predictions
def run(data):
data = data.sample(1000000)
errors = list()
med_errors = list()
for t1, t2 in cross_validation.KFold(data.shape[0], n_folds=10, shuffle=True):
# Prep data - still raw
train = data.iloc[t1]
test = data.iloc[t2]
y = get_actual_y(test)
e = error_rate(y['Expected'], predict(train, test)['Expected'])
med_e = error_rate(y['Expected'], simplest_predictions(train, test)['Expected'])
errors.append(e)
med_errors.append(med_e)
print("Median error rate: {} --- Error rate: {}".format(med_e, e))
print("Difference: {}".format(med_e - e))
print("Avg median error: {} ({})".format(statistics.mean(med_errors), statistics.stdev(med_errors)))
print("Avg error: {} ({})".format(statistics.mean(errors), statistics.stdev(errors)))
print("Difference in errors: {}".format(statistics.mean(med_errors) - statistics.mean(errors)))
def error_rate(expected, predicted):
# MAE
return (expected - predicted).abs().mean()
def prep_and_filter_data(data):
means = data.groupby('Id').mean()
means.columns += '_mean'
medians = data.groupby('Id').median()
medians.columns += '_median'
comb = pd.concat([means, medians], axis=1)
#comb.drop('Expected_std', axis=1, inplace=True)
comb = comb[comb['Ref_mean'] > 0]
comb = comb[comb['Expected_mean'] < 70]
comb['Expected'] = comb['Expected_mean']
comb.drop('Expected_mean', inplace=True, axis=1)
return comb
# Data + features
# data_raw = pd.read_csv('input/train_clean.csv', usecols=[0,3,11,15,19,23])
MAX_FEATURES='auto'; N_EST=30; MAX_D=None; MIN_LEAF=1000;
run(data_raw)
train_raw = | pd.read_csv('input/train_clean.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare stations metadata."""
# pylint: disable=invalid-name
from io import BytesIO
from typing import Dict, List
from urllib.request import urlopen
from zipfile import ZipFile
import geopandas as gpd
import pandas as pd
import pandera as pa
import requests
ch_essentials_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int),
"NAME": pa.Column(pd.StringDtype()),
"POI_LATITUDE": pa.Column(
pa.Float64,
nullable=True,
),
"POI_LONGITUDE": pa.Column(
pa.Float64,
nullable=True,
),
},
index=pa.Index(pa.Int),
)
poi_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int, unique=True),
"ADDRESS_INFO": pa.Column(pd.StringDtype()),
"NAME": pa.Column(pd.StringDtype(), unique=True),
"CATEGORY": pa.Column(pd.StringDtype()),
"PHONE": pa.Column(pd.StringDtype()),
"EMAIL": pa.Column(pd.StringDtype()),
"WEBSITE": pa.Column(pd.StringDtype()),
"GEOID": pa.Column(pa.Float, nullable=True),
"RECEIVED_DATE": pa.Column(pd.StringDtype()),
"ADDRESS_POINT_ID": pa.Column(pa.Float, nullable=True),
"LINEAR_NAME_FULL": pa.Column(pd.StringDtype()),
"ADDRESS_FULL": pa.Column(pd.StringDtype()),
"POSTAL_CODE": pa.Column(pd.StringDtype()),
"MUNICIPALITY": pa.Column(pd.StringDtype()),
"CITY": pa.Column(pd.StringDtype()),
"PLACE_NAME": pa.Column(pd.StringDtype()),
"GENERAL_USE_CODE": pa.Column(pa.Float, nullable=True),
"CENTRELINE": pa.Column(pa.Float, nullable=True),
"LO_NUM": pa.Column(pa.Float, nullable=True),
"LO_NUM_SUF": pa.Column(pd.StringDtype()),
"HI_NUM": pa.Column(pd.StringDtype()),
"HI_NUM_SUF": pa.Column(pd.StringDtype()),
"LINEAR_NAME_ID": pa.Column(pa.Float, nullable=True),
"WARD": pa.Column(pd.StringDtype()),
"WARD_2003": pa.Column(pa.Float, nullable=True),
"WARD_2018": pa.Column(pa.Float, nullable=True),
"MI_PRINX": pa.Column(pa.Float, nullable=True),
"ATTRACTION": pa.Column(pd.StringDtype(), unique=True),
"MAP_ACCESS": pa.Column(pd.StringDtype()),
"POI_LONGITUDE": pa.Column(pa.Float, unique=False),
"POI_LATITUDE": pa.Column(pa.Float, unique=False),
},
index=pa.Index(pa.Int),
)
gdf_schema = pa.DataFrameSchema(
columns={
"AREA_ID": pa.Column(pa.Int),
"AREA_SHORT_CODE": pa.Column(pd.StringDtype()),
"AREA_LONG_CODE": pa.Column(pd.StringDtype()),
"AREA_NAME": pa.Column(pd.StringDtype()),
"Shape__Area": pa.Column(pa.Float64),
# "Shape__Length": pa.Column(pa.Float64),
# "LATITUDE": pa.Column(pd.StringDtype(), nullable=True),
"AREA_LATITUDE": pa.Column(pa.Float64),
# "LONGITUDE": pa.Column(pd.StringDtype(), nullable=True),
"AREA_LONGITUDE": pa.Column(pa.Float64),
},
index=pa.Index(pa.Int),
)
pub_trans_locations_schema = pa.DataFrameSchema(
columns={
"stop_id": pa.Column(pa.Int),
"stop_code": pa.Column(pa.Int),
"stop_name": pa.Column(pd.StringDtype()),
"stop_desc": pa.Column(pd.StringDtype(), nullable=True),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
"zone_id": pa.Column(pa.Float64, nullable=True),
"stop_url": pa.Column(pd.StringDtype(), nullable=True),
"location_type": pa.Column(pa.Float64, nullable=True),
"parent_station": pa.Column(pa.Float64, nullable=True),
"stop_timezone": pa.Column(pa.Float64, nullable=True),
"wheelchair_boarding": pa.Column(pa.Int),
},
index=pa.Index(pa.Int),
)
coll_univ_schema = pa.DataFrameSchema(
columns={
"institution_id": pa.Column(pa.Int),
"institution_name": pa.Column(pd.StringDtype()),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
},
index=pa.Index(pa.Int),
)
def get_lat_long(row):
"""Get latitude and longitude."""
return row["coordinates"]
@pa.check_output(poi_schema)
def get_poi_data(url: str, poi_params: Dict) -> pd.DataFrame:
"""Get points of interest within city boundaries."""
poi_dtypes_dict = dict(
ADDRESS_INFO=pd.StringDtype(),
NAME=pd.StringDtype(),
CATEGORY=pd.StringDtype(),
PHONE=pd.StringDtype(),
EMAIL=pd.StringDtype(),
WEBSITE=pd.StringDtype(),
RECEIVED_DATE=pd.StringDtype(),
LINEAR_NAME_FULL=pd.StringDtype(),
ADDRESS_FULL= | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python3
"""
@author: zhaoz
"""
import pandas as pd
import numpy as np
| pd.set_option('max_row', None) | pandas.set_option |
import json
import pathlib
import re
import pandas
import requests
from bs4 import BeautifulSoup
BASE_URL = "https://campusboard.hs-kl.de/portalapps/sv/ModulAnsicht.do"
studiengaenge_ids = {
"Bachelor Angewandte Informatik": 213,
"Master Informatik": 482,
"Bachelor Elektrotechnik": 312,
}
schwerpunkte_ids = {
"Nachrichtentechnik": 324,
"Software Entwicklung": 308,
}
def construct_data_source_url(studiengang, schwerpunkt=None):
url = f"{BASE_URL}?stgid={studiengaenge_ids.get(studiengang)}"
if schwerpunkt:
url = f"{url}&cspkt_id={schwerpunkte_ids.get(schwerpunkt)}"
return url
def get_courses(studiengang, schwerpunkt=None):
if studiengang not in studiengaenge_ids:
raise ValueError(f"Error: Unknown Studiengang {studiengang}")
if schwerpunkt and schwerpunkt not in schwerpunkte_ids:
raise ValueError(f"Error: Unknown Schwerpunkt {schwerpunkt}")
data_source_url = construct_data_source_url(studiengang, schwerpunkt)
response = requests.get(data_source_url)
html = response.text
soup = BeautifulSoup(html, "html.parser")
table = soup.find("table", {"class": "border_collapse"})
rows = table.findAll("tr")
courses = []
for row in rows:
columns = row.findAll("td")
if len(columns) == 6 or len(columns) == 7:
course = [column.text.strip() for column in columns]
courses.append(course)
if len(courses[0]) == 6:
headers = ["semester", "modulnr", "modul", "sws", "cp", "pdf"]
else:
headers = ["semester", "modulnr", "sp", "modul", "sws", "cp", "pdf"]
courses_dict_list = [dict(zip(headers, course)) for course in courses]
df = as_sorted_dataframe(courses_dict_list)
courses_list = as_list(df)
return courses_list
def pretty_print_coures(courses):
print(json.dumps(courses, indent=2))
def as_sorted_dataframe(courses):
return | pandas.DataFrame(courses) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pickle as pkl
import proj_utils as pu
from os.path import isdir, join
from os import mkdir
from copy import deepcopy
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn import ensemble, feature_selection, model_selection, preprocessing, svm, metrics, neighbors
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import shuffle
from sklearn.exceptions import ConvergenceWarning
seed = 13
def calc_scores(y_test, predicted):
balanced = metrics.balanced_accuracy_score(y_test, predicted)
chance = metrics.balanced_accuracy_score(y_test, predicted, adjusted=True)
f1 = metrics.f1_score(y_test, predicted, average=None)
return balanced, chance, f1
def save_scores(f1_scores, balanced_scores, chance_scores, class_labels):
# Calculate average performance and tack it onto the end of the score list, save to nice df
n_folds = len(balanced_scores)
f1_array = np.asarray(f1_scores)
if n_folds != f1_array.shape[0]:
raise ValueError("Number of folds does not match")
rownames = ['Fold %02d' % (n+1) for n in range(n_folds)]
rownames.append('Average')
f1_class_averages = np.mean(f1_array, axis=0)
f1_data = np.vstack((f1_array, f1_class_averages))
f1_df = | pd.DataFrame(f1_data, index=rownames, columns=class_labels) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
import re
from os import PathLike
from pathlib import Path
from scipy.ndimage import maximum_filter
from typing import (
Generator,
List,
Optional,
Sequence,
Tuple,
Union,
)
from steinbock import io
try:
from readimc import MCDFile, TXTFile
from readimc.data import Acquisition, AcquisitionBase
imc_available = True
except:
imc_available = False
_logger = logging.getLogger(__name__)
def list_mcd_files(mcd_dir: Union[str, PathLike]) -> List[Path]:
return sorted(Path(mcd_dir).rglob("*.mcd"))
def list_txt_files(txt_dir: Union[str, PathLike]) -> List[Path]:
return sorted(Path(txt_dir).rglob("*.txt"))
def create_panel_from_imc_panel(
imc_panel_file: Union[str, PathLike],
imc_panel_channel_col: str = "Metal Tag",
imc_panel_name_col: str = "Target",
imc_panel_keep_col: str = "full",
imc_panel_ilastik_col: str = "ilastik",
) -> pd.DataFrame:
imc_panel = pd.read_csv(
imc_panel_file,
sep=",|;",
dtype={
imc_panel_channel_col: pd.StringDtype(),
imc_panel_name_col: pd.StringDtype(),
imc_panel_keep_col: pd.BooleanDtype(),
imc_panel_ilastik_col: pd.BooleanDtype(),
},
engine="python",
true_values=["1"],
false_values=["0"],
)
for required_col in (imc_panel_channel_col, imc_panel_name_col):
if required_col not in imc_panel:
raise ValueError(f"Missing '{required_col}' column in IMC panel")
for notnan_col in (
imc_panel_channel_col,
imc_panel_keep_col,
imc_panel_ilastik_col,
):
if notnan_col in imc_panel and imc_panel[notnan_col].isna().any():
raise ValueError(f"Missing values for '{notnan_col}' in IMC panel")
rename_columns = {
imc_panel_channel_col: "channel",
imc_panel_name_col: "name",
imc_panel_keep_col: "keep",
imc_panel_ilastik_col: "ilastik",
}
drop_columns = [
panel_col
for imc_panel_col, panel_col in rename_columns.items()
if panel_col in imc_panel.columns and panel_col != imc_panel_col
]
panel = imc_panel.drop(columns=drop_columns).rename(columns=rename_columns)
for _, g in panel.groupby("channel"):
panel.loc[g.index, "name"] = " / ".join(g["name"].dropna().unique())
if "keep" in panel:
panel.loc[g.index, "keep"] = g["keep"].any()
if "ilastik" in panel:
panel.loc[g.index, "ilastik"] = g["ilastik"].any()
panel = panel.groupby(panel["channel"].values).aggregate("first")
panel = _clean_panel(panel) # ilastik column may be nullable uint8 now
ilastik_mask = panel["ilastik"].fillna(False).astype(bool)
panel["ilastik"] = pd.Series(dtype=pd.UInt8Dtype())
panel.loc[ilastik_mask, "ilastik"] = range(1, ilastik_mask.sum() + 1)
return panel
def create_panel_from_mcd_files(
mcd_files: Sequence[Union[str, PathLike]]
) -> pd.DataFrame:
panels = []
for mcd_file in mcd_files:
with MCDFile(mcd_file) as f:
for slide in f.slides:
for acquisition in slide.acquisitions:
panel = _create_panel_from_acquisition(acquisition)
panels.append(panel)
panel = pd.concat(panels, ignore_index=True, copy=False)
return _clean_panel(panel)
def create_panel_from_txt_files(
txt_files: Sequence[Union[str, PathLike]]
) -> pd.DataFrame:
panels = []
for txt_file in txt_files:
with TXTFile(txt_file) as f:
panel = _create_panel_from_acquisition(f)
panels.append(panel)
panel = pd.concat(panels, ignore_index=True, copy=False)
return _clean_panel(panel)
def filter_hot_pixels(img: np.ndarray, thres: float) -> np.ndarray:
kernel = np.ones((1, 3, 3), dtype=bool)
kernel[0, 1, 1] = False
max_neighbor_img = maximum_filter(img, footprint=kernel, mode="mirror")
return np.where(img - max_neighbor_img > thres, max_neighbor_img, img)
def preprocess_image(
img: np.ndarray, hpf: Optional[float] = None
) -> np.ndarray:
img = img.astype(np.float32)
if hpf is not None:
img = filter_hot_pixels(img, hpf)
return io._to_dtype(img, io.img_dtype)
def try_preprocess_images_from_disk(
mcd_files: Sequence[Union[str, PathLike]],
txt_files: Sequence[Union[str, PathLike]],
channel_names: Optional[Sequence[str]] = None,
hpf: Optional[float] = None,
) -> Generator[
Tuple[Path, Optional["Acquisition"], np.ndarray, Optional[Path], bool],
None,
None,
]:
unmatched_txt_files = list(txt_files)
for mcd_file in mcd_files:
try:
with MCDFile(mcd_file) as f_mcd:
for slide in f_mcd.slides:
for acquisition in slide.acquisitions:
matched_txt_file = _match_txt_file(
mcd_file, acquisition, unmatched_txt_files
)
if matched_txt_file is not None:
unmatched_txt_files.remove(matched_txt_file)
channel_ind = None
if channel_names is not None:
channel_ind = _get_channel_indices(
acquisition, channel_names
)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} not found for "
f"acquisition {acquisition.id} in file "
"{mcd_file}; skipping acquisition"
)
continue
img = None
recovered = False
try:
img = f_mcd.read_acquisition(acquisition)
except IOError:
_logger.warning(
f"Error reading acquisition {acquisition.id} "
f"from file {mcd_file}"
)
if matched_txt_file is not None:
_logger.warning(
f"Restoring from file {matched_txt_file}"
)
try:
with TXTFile(matched_txt_file) as f_txt:
img = f_txt.read_acquisition()
if channel_names is not None:
channel_ind = _get_channel_indices(
f_txt, channel_names
)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} "
"not found in file "
f"{matched_txt_file}; "
"skipping acquisition"
)
continue
recovered = True
except IOError:
_logger.exception(
"Error reading file "
f"{matched_txt_file}"
)
if img is not None: # exceptions ...
if channel_ind is not None:
img = img[channel_ind, :, :]
img = preprocess_image(img, hpf=hpf)
yield (
Path(mcd_file),
acquisition,
img,
Path(matched_txt_file)
if matched_txt_file is not None
else None,
recovered,
)
del img
except:
_logger.exception(f"Error reading file {mcd_file}")
while len(unmatched_txt_files) > 0:
txt_file = unmatched_txt_files.pop(0)
try:
channel_ind = None
with TXTFile(txt_file) as f:
if channel_names is not None:
channel_ind = _get_channel_indices(f, channel_names)
if isinstance(channel_ind, str):
_logger.warning(
f"Channel {channel_ind} not found in file "
f"{txt_file}; skipping acquisition"
)
continue
img = f.read_acquisition()
if channel_ind is not None:
img = img[channel_ind, :, :]
img = preprocess_image(img, hpf=hpf)
yield Path(txt_file), None, img, None, False
del img
except:
_logger.exception(f"Error reading file {txt_file}")
def _create_panel_from_acquisition(
acquisition: "AcquisitionBase",
) -> pd.DataFrame:
panel = pd.DataFrame(
data={
"channel": acquisition.channel_names,
"name": acquisition.channel_labels,
"keep": True,
"ilastik": range(1, acquisition.num_channels + 1),
"deepcell": np.nan,
},
)
panel["channel"] = panel["channel"].astype(pd.StringDtype())
panel["name"] = panel["name"].astype(pd.StringDtype())
panel["keep"] = panel["keep"].astype(pd.BooleanDtype())
panel["ilastik"] = panel["ilastik"].astype( | pd.UInt8Dtype() | pandas.UInt8Dtype |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from linearmodels import PanelOLS
import statsmodels.api as sm
import econtools as econ
import econtools.metrics as mt
import math
from statsmodels.stats.outliers_influence import variance_inflation_factor
from auxiliary.prepare import *
from auxiliary.table2 import *
from auxiliary.table3 import *
from auxiliary.table4 import *
from auxiliary.table5 import *
from auxiliary.table6 import *
from auxiliary.table7 import *
from auxiliary.extension import *
from auxiliary.table_formula import *
def calc_vif(X):
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
return(vif)
def table5_setting(data):
df = data
df = df[((df['turin_co_sample']==1) | (df['turin_pr_sample']==1)) & ((df['post_experience']>=5)|(df['post_experience'].isnull()==True)) & ((df['pre_experience']>=5)|(df['pre_experience'].isnull()==True))& (df['missing']==0)]
df = df[(df['ctrl_pop_turin_co_sample']==1) | (df['ctrl_pop_turin_pr_sample']==1) | (df['ctrl_exp_turin_co_sample']==1) | (df['ctrl_exp_turin_pr_sample']==1) | (df['ctrl_pop_exp_turin_co_sample']==1) | (df['ctrl_pop_exp_turin_pr_sample']==1)]
df = df.reset_index()
#re-construct trend-pa: setting
id_auth_remained = df['id_auth'].unique()
id_auth_remained_df = pd.DataFrame({'id_auth': [], 'group_num': []})
for i in range(len(id_auth_remained)):
id_auth_remained_df.loc[i,'id_auth'] = id_auth_remained[i]
id_auth_remained_df.loc[i,'group_num'] = i+1
for i in range(len(df)):
for j in range(len(id_auth_remained_df)):
if df.loc[i, 'id_auth'] == id_auth_remained_df.loc[j, 'id_auth']:
df.loc[i, 'id_auth_remained'] = j+1
id_auth_remained_dum = pd.get_dummies(df['id_auth_remained']).rename(columns=lambda x: 'id_auth_remained' + str(x))
df = pd.concat([df, id_auth_remained_dum],axis = 1)
#re-construct trend-pa
for i in range(len(id_auth_remained_dum.columns)):
df['trend_pa_remained_'+str(i+1)] = 0
for j in range(len(df)):
if df.loc[j, id_auth_remained_dum.columns[i]]==1 and df.loc[j, 'authority_code']!=3090272 and df.loc[j, 'authority_code']!=3070001:
df.loc[j,'trend_pa_remained_'+str(i+1)] = 1
df.drop([id_auth_remained_dum.columns[i]],axis = 1)
return(df)
def table5_PanelA_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelA_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('year_dum_2006.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = | pd.DataFrame((ci_1,ci_2)) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def get_model_name(estimator):
"""
Given an estimator object, return a string
with the name of its class.
"""
return str(estimator.__class__)[1:-1].split()[1][1:-1].split('.')[-1]
def cross_val_for_estimators(estimators, X, y, **cross_val_score_kwargs):
"""
Run cross-validation on a set of estimators
and the (X, y) dataset.
Return the result as a Pandas dataframe in which each row
corresponds to the respective estimator (indexed 0, 1, ..., n - 1),
and the columns comprise a set of fold_1, fold_2, ..., fold_n,
and cv_score_mean (mean value across all folds for the given
esimator).
"""
cv_scores = []
for est in estimators:
current_scores = cross_val_score(est, X, y, **cross_val_score_kwargs)
cv_scores.append(current_scores)
n_folds = len(cv_scores[0])
col_names = ['fold_{}'.format(i+1) for i in range(n_folds)]
summary = | pd.DataFrame(cv_scores, columns=col_names) | pandas.DataFrame |
'''
Joining the data using SQL can be extremely slow with only the date as the index.
A possible way I found to cope with the 15 minutes return data, which has more than 200 million rows,
it to use pandas and cut it apart into chunks and then read a part of it, do the join, and then
use concat to combine them up.
'''
import pandas as pd
timebars = pd.read_csv('timebars_with_news-v3-20180624.csv')
del timebars['Unnamed: 0']
timebars['date'] = pd.to_datetime(timebars['date'])
reader = pd.read_csv('returns_15min.csv', iterator=True)
loop = True
chunkSize = 100000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
except StopIteration:
loop = False
print ("Iteration is stopped.")
merged_data = []
step = 100
for i in range(0, len(chunks), step):
if i <= len(chunks) - step:
bar_data1 = | pd.concat(chunks[i:i + step], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
#
"""
Filename: extract_traffic_types.py
Date: Fri Oct 19 17:47:53 2018
Name: <NAME>
Description:
- Read all CSV file containing labels
- Group by traffic type (label)
- Generate one CSV file (incl. label) per traffic type
"""
import pandas as pd
from datetime import datetime
import numpy as np
def get_typelist(df):
"""
Extract traffic type from a pandas data frame containing IDS2017 CSV
file with labelled traffic
Parameter
---------
df: DataFrame
Pandas DataFrame corresponding to the content of a CSV file
Return
------
traffic_type_list: list
List of traffic types contained in the DataFrame
"""
traffic_type_list = df[' Label'].value_counts().index.tolist()
return traffic_type_list
def string2index(string):
"""
Convert a string to int so that it can be used as index in an array
Parameter
---------
string: string
string to be converted
Return
------
index: int
index corresponding to the string
"""
if string == 'BENIGN':
index = 0
elif string == 'FTP-Patator':
index = 1
elif string == 'SSH-Patator':
index = 2
elif string == 'DoS Hulk':
index = 3
elif string == 'DoS GoldenEye':
index = 4
elif string == 'DoS slowloris':
index = 5
elif string == 'DoS Slowhttptest':
index = 6
elif string == 'Heartbleed':
index = 7
elif string == 'Web Attack \x96 Brute Force':
index = 8
elif string == 'Web Attack \x96 XSS':
index = 9
elif string == 'Web Attack \x96 Sql Injection':
index = 10
elif string == 'Infiltration':
index = 11
elif string == 'Bot':
index = 12
elif string == 'PortScan':
index = 13
elif string == 'DDoS':
index = 14
else:
print("[ERROR] Cannot convert ", string)
index = -1
return index
def index2string(index):
"""
Convert an int to string
Parameter
---------
index: int
index to be converted
Return
------
string: string
string corresponding to the string
"""
if index == 0:
string = 'BENIGN'
elif index == 1:
string = 'FTP-Patator'
elif index == 2:
string = 'SSH-Patator'
elif index == 3:
string = 'DoS Hulk'
elif index == 4:
string = 'DoS GoldenEye'
elif index == 5:
string = 'DoS slowloris'
elif index == 6:
string = 'DoS Slowhttptest'
elif index == 7:
string = 'Heartbleed'
elif index == 8:
string = 'Web Attack Brute Force'
elif index == 9:
string = 'Web Attack XSS'
elif index == 10:
string = 'Web Attack Sql Injection'
elif index == 11:
string = 'Infiltration'
elif index == 12:
string = 'Bot'
elif index == 13:
string = 'PortScan'
elif index == 14:
string = 'DDoS'
else:
print("[ERROR] Cannot convert {}".format(index))
string = 'Error'
return string
def get_dataframe_ofType(df, traffic_type):
"""
Analyze traffic distribution of pandas data frame containing IDS2017 CSV
file with labelled traffic
Parameter
---------
df: DataFrame
Pandas DataFrame corresponding to the content of a CSV file
traffic_type: string
name corresponding to traffic type
Return
------
req_df: DataFrame
Pandas DataFrame containing only the requested traffic type
"""
req_df = df.loc[df[' Label'] == traffic_type]
# don't keep original indexes
req_df = req_df.reset_index()
return req_df
def remove_empty_lines(df):
"""
Remove empty lines imported from csv files into Pandas DataFrame as NaN.
For a fast processing, only FlowID is checked. If NaN, then the line is
dropped.
Parameters
----------
df: DataFrame
Pandas DataFrame to be inspected
Returns
-------
df_clean: DataFrame
Pandas DataFrame after clean-up
"""
df.replace([''], np.nan, inplace=True)
df_clean = df.dropna(subset=['Flow ID'], inplace=False)
n_removed = df.shape[0]-df_clean.shape[0]
if n_removed != 0:
print("[INFO] Empty lines removed: {}".
format(df.shape[0]-df_clean.shape[0]))
return df_clean
def detect_drop_outliers(df):
"""
Detect and drop NaN rows of a DataFrame
Parameters
----------
df: DataFrame
pandas DataFrame containing data
Returns
-------
clean_df: DataFrame
pandas DataFrame without rows containing NaN
"""
df.replace(['+Infinity', '-Infinity', 'Infinity'], np.nan,
inplace=True)
clean_df = df
lbl_list = df.columns.values
lbl_idx, = np.where(lbl_list == 'Flow Bytes/s')
clean_df['Flow Bytes/s'] = np.array(
clean_df.iloc[:, lbl_idx]).astype(float)
lbl_idx, = np.where(lbl_list == ' Flow Packets/s')
clean_df[' Flow Packets/s'] = np.array(
clean_df.iloc[:, lbl_idx]).astype(float)
null_columns = clean_df.columns[clean_df.isna().any()]
nan_cnt = clean_df[null_columns].isnull().sum()
if nan_cnt.empty is False:
print("\t\tNaN detected and dropped: ")
print(nan_cnt)
clean_df = clean_df.dropna(axis=0)
print("\t\tPrev shape: {} - New shape: {}".format(df.shape,
clean_df.shape))
return clean_df
def main():
# declare useful variables
input_path = "./cicids2017/csv_files/"
filelist = ("Monday-WorkingHours.pcap_ISCX.csv",
"Tuesday-WorkingHours.pcap_ISCX.csv",
"Wednesday-workingHours.pcap_ISCX.csv",
"Thursday-WorkingHours-Morning-WebAttacks.pcap_ISCX.csv",
"Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv",
"Friday-WorkingHours-Morning.pcap_ISCX.csv",
"Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv",
"Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
output_path = "./cicids2017/traffic_types/"
pq_list = ["BENIGN.parquet",
"FTP-Patator.parquet",
"SSH-Patator.parquet",
"DoS Hulk.parquet",
"DoS GoldenEye.parquet",
"DoS slowloris.parquet",
"DoS Slowhttptest.parquet",
"Heartbleed.parquet",
"Web Attack Brute Force.parquet",
"Web Attack XSS.parquet",
"Web Attack Sql Injection.parquet",
"Infiltration.parquet",
"Bot.parquet",
"PortScan.parquet",
"DDoS.parquet"]
dflist = [pd.DataFrame(), # benign
pd.DataFrame(), # ftp_patator
pd.DataFrame(), # ssh_patator
| pd.DataFrame() | pandas.DataFrame |
"""
##############################################################################
#
# Filter ATtRACT database and save motifs in a MotEvo-compatible format
#
# AUTHOR: Maciej_Bak
# AFFILIATION: University_of_Basel
# AFFILIATION: Swiss_Institute_of_Bioinformatics
# CONTACT: <EMAIL>
# CREATED: 23-11-2021
# LICENSE: Apache_2.0
#
##############################################################################
"""
# imports
import time
import logging
import logging.handlers
from argparse import ArgumentParser, RawTextHelpFormatter
import re
import os
import numpy as np
import pandas as pd
def parse_arguments():
"""Parser of the command-line arguments."""
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
choices=("DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
default="ERROR",
help="Verbosity/Log level. Defaults to ERROR",
)
parser.add_argument(
"-l", "--logfile", dest="logfile", help="Store log to this file."
)
parser.add_argument(
"--dbpath", dest="dbpath", required=True, help="Path to the database textfile.",
)
parser.add_argument(
"--pwmpath",
dest="pwmpath",
required=True,
help="Path to the database pwm file.",
)
parser.add_argument(
"--outdir", dest="outdir", required=True, help="Path for the output directory.",
)
return parser
##############################################################################
# custom function adapted from the initial work for:
# https://github.com/gruber-sciencelab/SMEAGOL
def read_pms_from_file(file, value_col="probs", lengths=False, transpose=False):
"""Function to read position matrices from a fasta-like file in Attract format.
Args:
pm_file (str): file containing PMs
value_col (str): name for column containing PM values
lengths (bool): lengths are provided in the file
transpose (bool): transpose the matrix
Returns:
pandas dataframe containing PMs
"""
# Read file
pms = list(open(file, "r"))
pms = [x.strip().split("\t") for x in pms]
# Get matrix start and end positions
starts = np.where([x[0].startswith(">") for x in pms])[0]
assert starts[0] == 0
ends = np.append(starts[1:], len(pms))
# Get matrix IDs and values
pm_ids = [l[0].strip(">") for l in pms if l[0].startswith(">")]
if lengths:
lens = np.array([l[1] for l in pms if l[0].startswith(">")]).astype("int")
assert np.all(lens == ends - starts - 1)
pms = [pms[start + 1 : end] for start, end in zip(starts, ends)]
if transpose:
pms = [np.transpose(np.array(x).astype("float")) for x in pms]
else:
pms = [np.array(x).astype("float") for x in pms]
# Make dataframe
return pd.DataFrame({"Matrix_id": pm_ids, value_col: pms})
# custom function adapted from:
# https://github.com/gruber-sciencelab/SMEAGOL
def trim_ppm(probs, information_content):
"""Function to trim non-informative columns from ends of a PPM.
Args:
probs (np.array): array containing PPM probability values
frac_threshold (float): threshold (0-1) to filter out non-informative columns.
Returns:
result (np.array): array containing trimmed PPM.
"""
pos_ic = position_wise_ic(probs, axis=1)
to_trim = pos_ic <= information_content
positions = list(range(probs.shape[0]))
assert len(to_trim) == len(positions)
# Trim from start
while to_trim[0]:
positions = positions[1:]
to_trim = to_trim[1:]
# Trim from end
while to_trim[-1]:
positions = positions[:-1]
to_trim = to_trim[:-1]
result = probs[positions, :]
return result
# custom function adapted from:
# https://github.com/gruber-sciencelab/SMEAGOL
def position_wise_ic(probs, axis=1):
"""Function to calculate information content of each column in a PPM.
Args:
probs (np.array): array containing PPM probability values
Returns:
result (np.array): information content of each column in probs.
"""
position_wise_entropy = np.apply_along_axis(entropy, axis=axis, arr=probs)
result = 2 - position_wise_entropy
return result
# custom function adapted from:
# https://github.com/gruber-sciencelab/SMEAGOL
def entropy(probs):
"""Function to calculate entropy of a PPM or column of a PPM.
Args:
probs (np.array): Array containing probability values
Returns:
result (float): Entropy value
"""
result = -np.sum(probs * np.log2(probs))
return result
def main():
"""Main body of the script."""
# read in the motif annotation file
db = | pd.read_csv(options.dbpath, sep="\t") | pandas.read_csv |
import pandas as pd
import re
import Methods as m
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from spellchecker import SpellChecker
from nltk.tokenize.treebank import TreebankWordDetokenizer
from sklearn.feature_extraction.text import CountVectorizer
spell = SpellChecker(distance = 1)
text_set = []
corpus = []# Final corpus
#----collect dataSet----
print("reading dataset 1")
dataSet1 = pd.read_csv('venv/Data/newUpdate.csv', names=['id', 'text'], header=1)
for text in dataSet1["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset 2")
dataSet2 = pd.read_csv('venv/Data/protest.csv', names=['id', 'text'], header=1)
for text in dataSet2["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset 3")
dataSet3 = pd.read_csv('venv/Data/corona.csv', names=['id', 'text'], header=1)
for text in dataSet3["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset b")
dataSeta4 = pd.read_csv('venv/Data/datar.csv', names=['id', 'text'], header=1)
for text in dataSeta4["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset 5")
dataSet5 = pd.read_csv('venv/Data/fashion.csv', names=['id', 'text'], header=1)
for text in dataSet5["text"]:
text_set.append(text)
print("size of data" , len(text_set))
print("reading dataset 6")
dataSet6 = | pd.read_csv('venv/Data/Data.csv', names=['ID', 'TEXT'], header=1) | pandas.read_csv |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The trainer program for Adelaide_EA."""
import os
import torch
import logging
import json
import pandas as pd
import numpy as np
from copy import deepcopy
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.metrics.pytorch import calc_model_flops_params
from vega.core.common import FileOps, Config
from vega.search_space.networks import NetworkDesc
from vega.search_space import SearchSpace
from vega.search_space.codec import Codec
from vega.core.trainer.callbacks import Callback
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AdelaideEATrainerCallback(Callback):
"""Construct the trainer of Adelaide-EA."""
def before_train(self, logs=None):
"""Be called before the training process."""
self.cfg = self.trainer.cfg
self.trainer.auto_save_ckpt = False
self.trainer.auto_save_perf = False
self.worker_id = self.trainer.worker_id
self.local_base_path = self.trainer.local_base_path
self.local_output_path = self.trainer.local_output_path
self.result_path = FileOps.join_path(self.local_output_path, self.cfg.step_name)
FileOps.make_dir(self.result_path)
count_input = torch.FloatTensor(1, 3, 192, 192).cuda()
flops_count, params_count = calc_model_flops_params(
self.trainer.model, count_input)
GFlops, KParams = flops_count * 1e-9, params_count * 1e-3
logger.info("Flops: {:.2f} G, Params: {:.1f} K".format(GFlops, KParams))
if GFlops > 0.6:
logger.info("Flop too large!")
self.trainer.skip_train = True
self._copy_needed_file()
def make_batch(self, batch):
"""Make batch for each training step."""
input = batch["data"]
target = batch["mask"]
if self.cfg.cuda:
input = input.cuda()
target = target.cuda()
return input, target
def after_epoch(self, epoch, logs=None):
"""Be called after one epoch training."""
self.performance = logs.get('summary_perfs', None)
gflops = self.performance['gflops']
kparams = self.performance['kparams']
cur_valid_perfs = self.performance['cur_valid_perfs']
best_valid_perfs = self.performance['best_valid_perfs']
perfs = {'gflops': gflops, 'kparams': kparams,
'cur_valid_perf': list(cur_valid_perfs.values())[0],
'best_valid_perf': list(best_valid_perfs.values())[0]
}
best_changed = self.performance['best_valid_perfs_changed']
if best_changed:
self._save_checkpoint(perfs)
def after_train(self, logs=None):
"""Be called after the whole train process."""
gflops = self.performance['gflops']
kparams = self.performance['kparams']
cur_valid_perfs = self.performance['cur_valid_perfs']
best_valid_perfs = self.performance['best_valid_perfs']
perfs = {'gflops': gflops, 'kparams': kparams,
'cur_valid_perf': list(cur_valid_perfs.values())[0],
'best_valid_perf': list(best_valid_perfs.values())[0]
}
self._save_checkpoint(perfs, "latest.pth")
self._save_performance(perfs, self.trainer.model.desc)
if self.cfg.get('save_model_desc', False):
self._save_model_desc()
def _copy_needed_file(self):
if "pareto_front_file" in self.cfg and self.cfg.pareto_front_file is not None:
init_pareto_front_file = self.cfg.pareto_front_file.replace("{local_base_path}", self.local_base_path)
self.pareto_front_file = FileOps.join_path(self.result_path, "pareto_front.csv")
FileOps.copy_file(init_pareto_front_file, self.pareto_front_file)
if "random_file" in self.cfg and self.cfg.random_file is not None:
init_random_file = self.cfg.random_file.replace("{local_base_path}", self.local_base_path)
self.random_file = FileOps.join_path(self.local_output_path, self.cfg.step_name, "random.csv")
FileOps.copy_file(init_random_file, self.random_file)
def _save_model_desc(self):
search_space = SearchSpace()
codec = Codec(self.cfg.codec, search_space)
pareto_front_df = pd.read_csv(FileOps.join_path(self.result_path, "pareto_front.csv"))
codes = pareto_front_df['Code']
for i in range(len(codes)):
search_desc = Config()
search_desc.custom = deepcopy(search_space.search_space.custom)
search_desc.modules = deepcopy(search_space.search_space.modules)
code = codes.loc[i]
search_desc.custom.code = code
search_desc.custom.method = 'full'
codec.decode(search_desc.custom)
self.trainer.output_model_desc(i, search_desc)
def _save_checkpoint(self, performance=None, model_name="best.pth"):
"""Save the trained model.
:param performance: dict of all the result needed
:param model_name: name of the result file
:return: the path of the saved file
"""
model_save_path = FileOps.join_path(self.trainer.get_local_worker_path(), model_name)
torch.save({
'model_state_dict': self.trainer.model.state_dict(),
**performance
}, model_save_path)
torch.save(self.trainer.model.state_dict(), model_save_path)
logger.info("model saved to {}".format(model_save_path))
return model_save_path
def _save_performance(self, performance, model_desc=None):
"""Save result of the model, and calculate pareto front.
:param performance: The dict that contains all the result needed
:param model_desc: config of the model
"""
performance_str = json.dumps(performance, indent=4, sort_keys=True)
self.trainer._save_performance(performance_str)
method = model_desc.method
code = model_desc.code
result_file_name = FileOps.join_path(self.result_path, "{}.csv".format(method))
header = "Code,GFlops,KParams,mIoU,Best mIoU,Worker_id\n"
if not os.path.exists(result_file_name):
with open(result_file_name, 'w') as file:
file.write(header)
with open(result_file_name, 'a') as file:
file.write('{},{},{},{},{},{}\n'.format(
code, performance['gflops'], performance['kparams'],
performance["cur_valid_perf"], performance["best_valid_perf"],
self.worker_id
))
logger.info("Model result saved to {}".format(result_file_name))
self._save_pareto_front("GFlops", "Best mIoU")
def _save_pareto_front(self, metric_x, metric_y):
"""Save pareto front of the searched models.
:param metric_x: x axis of pareto front
:param metric_y: y axis of pareto front
"""
df_all = pd.read_csv(FileOps.join_path(self.result_path, "random.csv"))
mutate_csv = FileOps.join_path(self.result_path, 'mutate.csv')
if os.path.exists(mutate_csv):
df_mutate = | pd.read_csv(mutate_csv) | pandas.read_csv |
from datetime import datetime
import pandas as pd
import numpy as np
import openpyxl
import schedule
import time
import config
DESIGN_PERCENTAGE_PRESSURE_LEFT_UNDERSLUICE = {
'EP-A': [28.47], 'EP-B': [19.50], 'EP-C': [18.59], 'EP-D': [31.78], 'EP-E': [5.62]
}
DESIGN_PERCENTAGE_PRESSURE_MAIN_WEIR = {
'EP-A': [70.63], 'EP-B': [56.27], 'EP-C': [43.07], 'EP-D': [31.78], 'EP-E': [5.62]
}
DESIGN_PERCENTAGE_PRESSURE_OLD_RIGHT_UNDERSLUICE = {
'EP-A': [36.59], 'EP-B': [33.34], 'EP-C': [24.64], 'EP-D': [20.61], 'EP-E': [3.19]
}
DESIGN_PERCENTAGE_PRESSURE_ADDITIONAL_BAYS = {
'EP-A': [43.59], 'EP-B': [41.60], 'EP-C': [35.34], 'EP-D': [24.04], 'EP-E': [7.41]
}
DESIGN_PERCENTAGE_PRESSURE_NEW_RIGHT_UNDERSLUICE = {
'EP-A': [43.59], 'EP-B': [41.60], 'EP-C': [35.34], 'EP-D': [24.04], 'EP-E': [6.73]
}
left_undersluice_columns = ['EP(1)', 'EP(2)', 'EP(3)']
main_weir_columns = ['EP(4)', 'EP(5)', 'EP(6)', 'EP(7)', 'EP(8)', 'EP(11)', 'EP(12)', 'EP(13)', 'EP(14)', 'EP(15)', 'EP(16)', 'EP(17)',
'EP(18)', 'EP(19)', 'EP(20)', 'EP(21)', 'EP(22)', 'EP(23)', 'EP(9)', 'EP(9)', 'EP(26)', 'EP(27)', 'EP(28)', 'EP(29)', 'EP(30)']
old_right_undersluice_columns = [
'EP(31)', 'EP(32)', 'EP(33)', 'EP(34)', 'EP(35)']
additional_bays_columns = ['EP(36)', 'EP(37)', 'EP(38)', 'EP(39)', 'EP(40)']
new_right_undersluice_columns = [
'EP(41)', 'EP(42)', 'EP(43)', 'EP(44)', 'EP(45)']
SAFE_HGL_LEVEL = []
path_excel = config.excel_path
path_loc = config.dat_path
conn = config.conn
def generate_report():
data = str(datetime.now())
u_date = data[:19]
#
left_undersluice(u_date)
main_weir(u_date)
old_right_undersluice(u_date)
additional_bays(u_date)
left_undersluice(u_date)
SAFE_HGL_LEVEL.append(u_date)
append_safe_hgl_level()
# context = {
# 'form': DateForm(), 'date': date, 'time': time,
# 'RL_left': left_rp, 'DPP_left': left_dpp, 'WL_left': left_wl,
# 'RL_main': main_rp, 'DPP_main': main_dpp, 'WL_main': main_wl, 'bay_no': [1, 9, 18, 28, 37],
# 'RL_old': old_rp, 'DPP_old': old_dpp, 'WL_old': old_wl,
# 'RL_additional': additional_rp, 'DPP_additional': additional_dpp, 'WL_additional': additional_wl,
# 'RL_nr': new_right_rp, 'DPP_nr': new_right_dpp, 'WL_nr': new_right_wl
# }
# return render(request, 'report/report.html', context)
###############################################################################################
def left_undersluice(u_date):
wl = left_undersluice_wl_builder(u_date)
dpp = left_undersluice_dpp_builder()
ep_list = get_ep_list(u_date, left_undersluice_columns)
report_list = left_undersluice_report_builder(wl, dpp, ep_list)
return report_list.values, dpp, wl
def main_weir(u_date):
wl = main_weir_wl_builder(u_date)
dpp = main_weir_dpp_builder()
ep_list = get_ep_list(u_date, main_weir_columns)
report_list = main_weir_report_builder(wl, dpp, ep_list)
chunk_list = np.split(report_list.values, 5)
return chunk_list, dpp, wl
def old_right_undersluice(u_date):
wl = old_right_undersluice_wl_builder(u_date)
dpp = old_right_undersluice_dpp_builder()
ep_list = get_ep_list(u_date, old_right_undersluice_columns)
report_list = old_right_undersluice_report_builder(wl, dpp, ep_list)
return report_list.values, dpp, wl
def new_right_undersluice(u_date):
wl = new_right_undersluice_wl_builder(u_date)
dpp = new_right_undersluice_dpp_builder()
ep_list = get_ep_list(u_date, new_right_undersluice_columns)
report_list = new_right_undersluice_report_builder(wl, dpp, ep_list)
return report_list.values, dpp, wl
def additional_bays(u_date):
wl = additional_bays_wl_builder(u_date)
dpp = additional_bays_dpp_builder()
ep_list = get_ep_list(u_date, additional_bays_columns)
report_list = additional_bays_report_builder(wl, dpp, ep_list)
return report_list.values, dpp, wl
###################################################################################################
def left_undersluice_wl_builder(u_input):
df = pd.read_sql_query(f'SELECT * FROM SC.dbo.WLS', conn)
df['dt'] = df['dt'].values.astype('<M8[m]')
filt = (df['dt'] == str(u_input))
df = df[filt][['W5', 'W4']]
df['Average_up'] = round((df['W5']), 2)
df['Average_down'] = round((df['W4']), 2)
df['Head Across'] = round((df['Average_up'] - df['Average_down']), 2)
return df.values[0]
def left_undersluice_dpp_builder():
| pd.DataFrame(DESIGN_PERCENTAGE_PRESSURE_LEFT_UNDERSLUICE) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": | pd.Series([5, 3, None, 7, 9, 2], dtype="Int64") | pandas.Series |
#!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2019 Mundi Web Services
# Licensed under the 3-Clause BSD License; you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# https://opensource.org/licenses/BSD-3-Clause
#
# Author : Dr. <NAME>
#
# Contact email: <EMAIL>
# =============================================================================
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import folium
from folium import plugins
import datetime
import xarray
import branca
import geojsoncontour
from mpl_toolkits.basemap import Basemap
import ipywidgets as widgets
import ftputil
import pandas as pd
import os
from ipywidgets import interact, interactive, fixed, interact_manual
import math
from pathlib import Path
from collections import Iterable
from PIL import Image
### Cmems Functions ##############################################################
class Cmems:
################################# MODEL AND SATELLITE PRODUCTS #################################################
################################################################################################################
############################################################################################
#------------------------------------------------------------------------------------------
# DOWNLOAD THE FILE (For MODEL AND SATELLITE PRODUCTS)
#------------------------------------------------------------------------------------------
###########################################################################################
@staticmethod
def download_Product(user,password,Product):
########## CASE 1 (Product=='Model') : Get the list of all model products offered by the cmems catalog
if Product=='Model':
Model_products=[]
# connect to CMEMS FTP
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir('Core')
product_list=[]
product_list = ftp_host.listdir(ftp_host.curdir)
for product in product_list:
items = product.split('_')
# conditions to select only model products
if 'OBSERVATIONS' not in items and 'MULTIOBS' not in items and 'INSITU' not in items:
Model_products.append(product)
data = {'MODEL PRODUCTS': []}
#-----------------------------------------------------------------------------------------------------
########## CASE 2 (Product=='Satellite') : Get the list of all satellite products offered by the cmems catalog
elif Product=='Satellite':
Model_products=[]
# connect to CMEMS FTP
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir('Core')
product_list=[]
product_list = ftp_host.listdir(ftp_host.curdir)
for product in product_list:
items = product.split('_')
# conditions to select only satellite products
if 'MULTIOBS' in items or 'OBSERVATIONS' in items and 'INSITU' not in items:
Model_products.append(product)
data = {'SATELLITE OBSERVATION PRODUCTS': []}
#-----------------------------------------------------------------------------------------------------
########## Initialize the widgets ------------------------------------------------------------------
style = {'description_width': 'initial'}
if Product=='Model':
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=Model_products,
value=Model_products[4],
description='Product:',
disabled=False)
elif Product=='Satellite':
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=Model_products,
value=Model_products[52],
description='Product:',
disabled=False)
product_name=x_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name)
product_list2 = ftp_host.listdir(ftp_host.curdir)
y_widget = widgets.RadioButtons(layout={'width': 'initial'},options=product_list2,value=product_list2[0],description='Available data type :',style=style)
product_name2=y_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2)
product_list3 = ftp_host.listdir(ftp_host.curdir)
z_widget = widgets.Dropdown(layout={'width': 'initial'},options=product_list3,value=product_list3[3],description='Year:')
product_name3=z_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3)
product_list4 = ftp_host.listdir(ftp_host.curdir)
w_widget = widgets.Dropdown(layout={'width': 'initial'},options=product_list4,value=product_list4[5],description='Month:')
product_name4=w_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3+'/'+product_name4)
product_list5 = ftp_host.listdir(ftp_host.curdir)
i_widget = widgets.Dropdown(layout={'width': 'initial'},options=product_list5,value=product_list5[3],description='File:')
#-----------------------------------------------------------------------------------------------------
############# Define a function that updates the content of (y_widget,z_widget,w_widget,i_widget) based on what we select for x_widget
def update(*args):
product_name=x_widget.value
# Get the list of the available data offered by the selected product
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name)
product_list2 = ftp_host.listdir(ftp_host.curdir)
# Get the content of y_widget based on x_widget.value
y_widget.options=product_list2
product_name2=y_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2)
product_list3 = ftp_host.listdir(ftp_host.curdir)
# Get the content of the widgets based on our selection for different cases:
# case 1 : Get the content of the widgets based on the value of y_widget
if 'nc' in product_list3[1]:
z_widget.options=product_list3
z_widget.description='File'
w_widget.options=['']
i_widget.options=['']
w_widget.description='option'
i_widget.description='option'
netcdf_files=[]
netcdf_files=product_list3
else:
z_widget.options=product_list3
z_widget.description='Year'
product_name3=z_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3)
product_list4 = ftp_host.listdir(ftp_host.curdir)
# case 2 : Get the content of the widgets based on the value of z_widget
if 'nc' in product_list4[1]:
w_widget.options=product_list4
w_widget.description='File'
i_widget.options=['']
netcdf_files=[]
netcdf_files=product_list4
else:
w_widget.options=product_list4
w_widget.description='Month'
product_name4=w_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3+'/'+product_name4)
product_list5 = ftp_host.listdir(ftp_host.curdir)
# case 3 : Get the content of the widgets based on the value of w_widget
if 'nc' in product_list5[1]:
i_widget.options=product_list5#['List of netCdf Files']
i_widget.description='File'
netcdf_files=[]
netcdf_files=product_list5
else:
i_widget.options=product_list5
i_widget.description='day'
return (z_widget.value,w_widget.value,i_widget.value)
# update the content of the widgets according to our selection
x_widget.observe(update,'value')
y_widget.observe(update,'value')
z_widget.observe(update,'value')
w_widget.observe(update,'value')
####################-------------------------------------------------------------------------------------------------
######## Define the download procedure using the ftp protocol
def random_function(x, y, z, w, i):
###### get the downloading path
path=[x,y,z,w,i]
path_new=[]
file=[]
for i in path:
if i != 'List of netCdf Files' and i != '':
path_new.append(i)
file=path_new[-1]
path2 = "Core"
for i in range(len(path_new)):
path2 = path2+'/'+str(path_new[i])
filepath2= path2
ncdf_file_name2=file
#-----------------------------------------
# define the downloading button
button = widgets.Button(description='''Download The File''')
out = widgets.Output()
def on_button_clicked(_):
# "linking function with output"
with out:
#try:
output_directory=[]
# set the output_directory of the file
if Product=='Model':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/01_Model_product'
else:
output_directory=os.getcwd()+'/cmems_data/01_Model_product'
elif Product=='Satellite':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/02_Satellite_product'
else:
output_directory=os.getcwd()+'/cmems_data/02_Satellite_product'
#--------------------------------------------------------------------
# creating a folder using the output_directory
p = Path(output_directory)
p.mkdir(parents=True, exist_ok=True)
# downloading the file using the ftp protocol
host = 'nrt.cmems-du.eu'
print(f"Downloading The File '{ncdf_file_name2}' in {output_directory}")
with ftputil.FTPHost(host, user, password) as ftp_host:
cwd = os.getcwd()
os.chdir(output_directory)
try:
ftp_host.download(filepath2, ncdf_file_name2) # remote, local
print("Done")
except:
print("Downloading can't be done for this file, please run the function again and choose a netCDF file")
os.chdir(cwd)
#except:
#print("Downloading can't be done, please run the function again and choose a netCDF file")
return(ncdf_file_name2)
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
aa=widgets.VBox([button,out])
return(aa)
#----------------------------------------------------------------------------------------
# display the interaction between the widgets
display(pd.DataFrame(data=data))
interact(random_function,
x = x_widget,
y = y_widget,
z = z_widget,
w = w_widget,
i = i_widget);
return(update)
###############################################################################################################
#--------------------------------------------------------------------------------------------
# READ THE DOWNLOADED FILE (For MODEL AND SATELLITE PRODUCTS)
#--------------------------------------------------------------------------------------------
###############################################################################################################
@staticmethod
def read_File(update,Product):
# get the name of the selected file
for i in update():
if 'nc' in i:
file=i
# get the current directory of the file
if Product=='Model':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/01_Model_product'
else:
output_directory=os.getcwd()+'/cmems_data/01_Model_product'
elif Product=='Satellite':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/02_Satellite_product'
else:
output_directory=os.getcwd()+'/cmems_data/02_Satellite_product'
# reading the netcdf file
dataset = output_directory+f'/{file}'
ds = xarray.open_dataset(dataset)
# get the list of the parameters of the netcdf file
list_parm_deleted=['time','lat','lon','depth','grid_mapping','x','y','longitude','latitude','LONGITUDE','LATITUDE','time_bnds']
full_list=list(ds.variables)
selected_list=[]
selected_list_name=[]
for i in full_list:
if not i in list_parm_deleted:
selected_list.append(i)
try:
selected_list_name.append(ds[i].attrs['standard_name'])
except:
try:
selected_list_name.append(ds[i].attrs['long_name'])
except:
selected_list_name.append(i)
return(ds,selected_list,selected_list_name,dataset)
################################################################################################################
#--------------------------------------------------------------------------------------------
# DISPLAY THE PARAMETERS OF THE FILE (For MODEL PRODUCTS)
#--------------------------------------------------------------------------------------------
###############################################################################################################
@staticmethod
def display_param_model(ds,selected_list,selected_list_name):
########## Initialize the widgets ------------------------------------------------------------------
dictionary = dict(zip(selected_list_name, selected_list))
if len(selected_list_name) < 4:
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=selected_list_name,
value=selected_list_name[0],
description='Parameters:',
disabled=False)
else:
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=selected_list_name, #selected_list,
value=selected_list_name[4], #selected_list[0],
description='Parameters:',
disabled=False)
style = {'description_width': 'initial'}
varb=dictionary[x_widget.value]
if len(ds[dictionary[x_widget.value]].shape) < 4:
n_widget = widgets.Label(value="This parameter does not allow a depth analysis")
y_widget = widgets.Dropdown(layout={'width': 'initial'},description='Longitude:')
z_widget = widgets.Dropdown(layout={'width': 'initial'},description='Latitude:')
else:
n_widget = widgets.Label(value="Please select a specific (Longitude,Latitude) to build also a depth analysis figure")
y_widget = widgets.Dropdown(layout={'width': 'initial'},description='Longitude:')
z_widget = widgets.Dropdown(layout={'width': 'initial'},description='Latitude:')
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[0]
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))[0]
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[0]
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))[0]
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[0]
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))[0]
#---------------------------------------------------------------------------------------------------
############# Define a function that updates the content of (y_widget,z_widget,n_widget) based on what we select for x_widget
def update_2(*args):
param_name=dictionary[x_widget.value]
varb=param_name
if len(ds[varb].shape) < 4:
y_widget.options=['']
z_widget.options=['']
n_widget.value="This parameter does not allow a depth analysis"
variable = ds.variables[varb][:]
vmin=variable[0,:,:].min()
vmax=variable[0,:,:].max()
else:
n_widget.value="Please select a specific (Longitude,Latitude) to build also a depth analysis figure"
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))
variable = ds.variables[varb][:]
vmin=variable[0,0,:,:].min()
vmax=variable[0,0,:,:].max()
return(vmin,vmax)
# update the content of the widgets according to our selection
x_widget.observe(update_2,'value')
n_widget.observe(update_2,'value')
y_widget.observe(update_2,'value')
z_widget.observe(update_2,'value')
#--------------------------------------------------------------------------------------------------------
####### configure the display according to the selected parameter
def random_function(x,n,y,z):
param_name=dictionary[x]
param_lon=y
param_lat=z
style = {'description_width': 'initial'}
button = widgets.Button(description="Display The Parameter",style=style)
out = widgets.Output()
def on_button_clicked(_):
# "linking function with output"
with out:
try:
varb=param_name
var_lon=param_lon
var_lat=param_lat
# define the longitude (max,min) and latitude (max,min) for the displaying
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
lon_max=ds.variables['x'][:].max()
lon_min=ds.variables['x'][:].min()
lat_max=ds.variables['y'][:].max()
lat_min=ds.variables['y'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['x'][:]
lats = ds.variables['y'][:]
else:
lon_max=ds.variables['lon'][:].max()
lon_min=ds.variables['lon'][:].min()
lat_max=ds.variables['lat'][:].max()
lat_min=ds.variables['lat'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['lon'][:]
lats = ds.variables['lat'][:]
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
lon_max=ds.variables['x'][:].max()
lon_min=ds.variables['x'][:].min()
lat_max=ds.variables['y'][:].max()
lat_min=ds.variables['y'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['x'][:]
lats = ds.variables['y'][:]
else:
lon_max=ds.variables['longitude'][:].max()
lon_min=ds.variables['longitude'][:].min()
lat_max=ds.variables['latitude'][:].max()
lat_min=ds.variables['latitude'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['longitude'][:]
lats = ds.variables['latitude'][:]
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
lon_max=ds.variables['x'][:].max()
lon_min=ds.variables['x'][:].min()
lat_max=ds.variables['y'][:].max()
lat_min=ds.variables['y'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['x'][:]
lats = ds.variables['y'][:]
else:
lon_max=ds.variables['LONGITUDE'][:].max()
lon_min=ds.variables['LONGITUDE'][:].min()
lat_max=ds.variables['LATITUDE'][:].max()
lat_min=ds.variables['LATITUDE'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['LONGITUDE'][:]
lats = ds.variables['LATITUDE'][:]
if lon_min <-180 or lat_min < -90 or lon_max > 180 or lat_max >90:
lon_min,lat_min,lon_max,lat_max= (-180,-90,180,90)
#---------------------------------------------------------------------
# case 1 : display the selected parameter on a map without a depth analysis
if len(ds[varb].shape) < 3:
variable = ds.variables[varb][:]
try:
variable_title=ds[varb].attrs['standard_name']
except:
try:
variable_title=ds[varb].attrs['long_name']
except:
variable_title=varb
lon, lat = np.meshgrid(lons, lats)
plt.figure(figsize=(20,7))
plt.subplot(121)
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
x, y = map(lon, lat)
cs=map.contourf(x,y,variable[:,:],cmap=plt.cm.jet,vmin=variable[:,:].min(), vmax=variable[:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar=map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label(f'{variable_title}', fontsize=15)
if varb == 'thetao' or varb == 'bottomT':
plt.title('MODEL-PRODUCT [degrees_C]', fontsize=15)
else:
plt.title('MODEL-PRODUCT', fontsize=15)
plt.title('MODEL-PRODUCT', fontsize=20)
plt.show()
# case 2 : display the selected parameter on a map without a depth analysis
elif 2 < len(ds[varb].shape) < 4:
variable = ds.variables[varb][:]
try:
variable_title=ds[varb].attrs['standard_name']
except:
try:
variable_title=ds[varb].attrs['long_name']
except:
variable_title=varb
lon, lat = np.meshgrid(lons, lats)
plt.figure(figsize=(20,7))
plt.subplot(121)
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
x, y = map(lon, lat)
cs=map.contourf(x,y,variable[0,:,:],cmap=plt.cm.jet,vmin=variable[0,:,:].min(), vmax=variable[0,:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar=map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label(f'{variable_title}', fontsize=15)
if varb == 'thetao' or varb == 'bottomT':
plt.title('MODEL-PRODUCT [degrees_C]', fontsize=15)
else:
plt.title('MODEL-PRODUCT', fontsize=15)
plt.title('MODEL-PRODUCT', fontsize=20)
plt.show()
# case 3 : display the selected parameter on a map with a depth analysis
else:
variable = ds.variables[varb][:]
try:
variable_title=ds[varb].attrs['standard_name']
except:
try:
variable_title=ds[varb].attrs['long_name']
except:
variable_title=varb
lon, lat = np.meshgrid(lons, lats)
plt.figure(figsize=(30,7))
plt.subplot(131)
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
x, y = map(lon, lat)
cs=map.contourf(x,y,variable[0,0,:,:],cmap=plt.cm.jet,vmin=variable[0,0,:,:].min(), vmax=variable[0,0,:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar=map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label(f'{variable_title}', fontsize=15)
if varb == 'thetao' or varb == 'bottomT':
plt.title('MODEL-PRODUCT (For Depth = 0) [degrees_C]', fontsize=15)
else:
plt.title('MODEL-PRODUCT (For Depth = 0)', fontsize=15)
# add the display of the depth analysis
plt.subplot(132)
# Get indexes for a Given Point (latitude = var_lat and longitude = var_lon)
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
vd=np.where(np.asarray(ds[varb]['x'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['y'])[:] == var_lat)
lons_test=ds[varb]['x'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['y'][np.asscalar(np.asarray(list(vd2)))]
else:
vd=np.where(np.asarray(ds[varb]['lon'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['lat'])[:] == var_lat)
lons_test=ds[varb]['lon'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['lat'][np.asscalar(np.asarray(list(vd2)))]
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
vd=np.where(np.asarray(ds[varb]['x'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['y'])[:] == var_lat)
lons_test=ds[varb]['x'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['y'][np.asscalar(np.asarray(list(vd2)))]
else:
vd=np.where(np.asarray(ds[varb]['longitude'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['latitude'])[:] == var_lat)
lons_test=ds[varb]['longitude'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['latitude'][np.asscalar(np.asarray(list(vd2)))]
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
vd=np.where(np.asarray(ds[varb]['x'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['y'])[:] == var_lat)
lons_test=ds[varb]['x'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['y'][np.asscalar(np.asarray(list(vd2)))]
else:
vd=np.where(np.asarray(ds[varb]['LONGITUDE'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['LATITUDE'])[:] == var_lat)
lons_test=ds[varb]['LONGITUDE'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['LATITUDE'][np.asscalar(np.asarray(list(vd2)))]
indx_lat=np.asscalar(np.asarray(list(vd2)))
indx_lon=np.asscalar(np.asarray(list(vd)))
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
s= 200*np.ones(1)
if math.isnan(np.array([ds[varb][0,0,indx_lat,indx_lon]])) == True:
cs3=map.scatter(np.array([lons_test]),np.array([lats_test]) , c=np.array([0]),s=s,cmap=plt.cm.gist_gray)
else:
cs3=map.scatter(np.array([lons_test]),np.array([lats_test]) , c=np.array([ds[varb][0,0,indx_lat,indx_lon]]),s=s,cmap=plt.cm.jet,vmin=variable[0,0,:,:].min(), vmax=variable[0,0,:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar3=map.colorbar(cs3,location='bottom',pad="5%")
cbar3.set_label(f'{variable_title}', fontsize=15)
plt.title(f'Selected Point', fontsize=20)
plt.subplot(133)
ds[varb][0,:,indx_lat,indx_lon].plot.line(y='depth',ylim=(110,0),yincrease=False)
plt.show()
except:
try:
varb=param_name
if len(ds[varb].shape) < 3:
ds[varb].plot()
elif 2 < len(ds[varb].shape) < 4:
ds[varb][0,:,:].plot()
else:
ds[varb][0,0,:,:].plot()
except:
print ("Displaying doesn't work, please choose another parameter or product (example : BALTICSEA_ANALYSIS_FORECAST_PHY_003_006) ")
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
a=widgets.VBox([button,out])
return(a)
#----------------------------------------------------------------------------------------------------------------
# display the interaction between the widgets
interact(random_function,
x = x_widget,
n = n_widget,
y = y_widget,
z = z_widget);
return(update_2)
###############################################################################################################
#--------------------------------------------------------------------------------------------
# DISPLAY THE PARAMETERS OF THE FILE (For SATELLITE PRODUCTS)
#--------------------------------------------------------------------------------------------
###############################################################################################################
@staticmethod
def display_param_satellite(ds2,selected_list2,file_name2,scale_min,scale_max,scale):
########## Initialize the widget ------------------------------------------------------------------
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=selected_list2,
value=selected_list2[5],
description='Parameters:',
disabled=False)
#-------------------------------------------------------------------------------------------------
## Define a function that updates the value of x_widget
def update_3(*args):
param_name=x_widget.value
x_widget.observe(update_3)
#-------------------------------------------------------------------------------------------------
####### configure the display according to the selected parameter
def random_function(x):
param_name=x
if param_name == 'sea_surface_temperature' or param_name == 'adjusted_sea_surface_temperature':
ds2 = xarray.open_dataset(file_name2)
ds2[param_name][0,:,:]=ds2[param_name][0,:,:]-273.15
else:
ds2 = xarray.open_dataset(file_name2)
ds2[param_name]=ds2[param_name]
button = widgets.Button(description='''Display The Param''')
out = widgets.Output()
# define the displaying button
def on_button_clicked(_):
# "linking function with output"
with out:
try:
varb=param_name
# a condition to see if there is a variable that represents the depth in this parameter
if len(ds2[varb].shape) < 4:
# display the selected parameter on a map
lons2 = ds2.variables['lon'][:]
lats2 = ds2.variables['lat'][:]
variable_ds2 = ds2.variables[varb][:]
variable_name = varb
lon2, lat2 = np.meshgrid(lons2, lats2)
plt.figure(figsize=(30,30))
plt.subplot(121)
map = Basemap(llcrnrlon=-40,llcrnrlat=20,urcrnrlon=60,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 800, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
#--------------------------------------------------------
# display the selected parameter for a zoomed area of the image
plt.subplot(122)
map = Basemap(llcrnrlon=7,llcrnrlat=50,urcrnrlon=32,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1200, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
plt.show()
#------------------------------------------------------------------
else:
# display the selected parameter on a map
lons2 = ds2.variables['lon'][:]
lats2 = ds2.variables['lat'][:]
variable_ds2 = ds2.variables[varb][:]
variable_name = varb
lon2, lat2 = np.meshgrid(lons2, lats2)
plt.figure(figsize=(30,30))
plt.subplot(121)
map = Basemap(llcrnrlon=-40,llcrnrlat=20,urcrnrlon=60,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 800, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
#------------------------------------------------------------------
# display the selected parameter for a zoomed area of the image
plt.subplot(122)
map = Basemap(llcrnrlon=7,llcrnrlat=50,urcrnrlon=32,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1200, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
plt.show()
#-----------------------------------------------------------------------------------
except:
print ("Displaying doesn't work, please choose another product (example : SST_EUR_SST_L3S_NRT_OBSERVATIONS_010_009_a) ")
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
a=widgets.VBox([button,out])
return(a)
#----------------------------------------------------------------------------------------------------------------
# display the interaction between the widget
interact(random_function,
x = x_widget);
############################# INSITU PRODUCT ###########################################
########################################################################################
############################################################################################
#--------------------------------------------------------------------------------------------
# DOWNLOAD THE FILES
#--------------------------------------------------------------------------------------------
############################################################################################
@staticmethod
def Insitu_Products_download(host,user,password):
# Get the list of all Insitu products offered by the cmems catalog
data = {'In Situ NRT products': []}
NRT_products = []
#connect to CMEMS FTP
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir('Core')
product_list = ftp_host.listdir(ftp_host.curdir)
for product in product_list:
items = product.split('_')
if 'INSITU' in items:
NRT_products.append(product)
#------------------------------------------------------------------
########## Initialize the widgets ------------------------------------------------------------------
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=NRT_products,
value=NRT_products[1],
description='Product:',
disabled=False)
product_name=x_widget.value
index_file = 'index_latest.txt' #type aimed index file (index_latest - index_monthly - index_history )
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = pd.read_csv(indexfile, skiprows=5) #load it as pandas dataframe
def flatten(items):
"""Yield items from any nested iterable"""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatten(x):
yield sub_x
else:
yield x
items=[]
for i in range(len(raw_index_info.parameters)):
items.append(raw_index_info.parameters[i].split(' '))
items=list(flatten(items))
items = list(set(items))
y_widget = widgets.Dropdown(layout={'width': 'initial'},description='Parameter:')
y_widget.options=items
try:
y_widget.value=items[items.index("TEMP")]
except:
y_widget.value=items[0]
style = {'description_width': 'initial'}
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = pd.read_csv(indexfile, skiprows=5) #load it as pandas dataframe
z_widget = widgets.Text(layout={'width': 'initial'},value='2019-06-03T23:00:00Z',description=f'Enter an initial date between {raw_index_info.time_coverage_start[0]} and {raw_index_info.time_coverage_start[len(raw_index_info.time_coverage_start)-1]} : ',style=style)
w_widget = widgets.Text(layout={'width': 'initial'},value='2019-06-04T22:59:59Z',description=f'Enter an end date between {raw_index_info.time_coverage_end[0]} and {raw_index_info.time_coverage_end[len(raw_index_info.time_coverage_end)-1]} : ',style=style)
display(pd.DataFrame(data=data))
#-----------------------------------------------------------------------------------------------------
####### Define a function that updates the content of (y_widget,w_widget,z_widget) based on what we select for x_widget
def update4(*args):
product_name=x_widget.value
index_file = 'index_latest.txt' #type aimed index file (index_latest - index_monthly - index_history )
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = | pd.read_csv(indexfile, skiprows=5) | pandas.read_csv |
import pandas as pd
import openpyxl
import numpy as np
import os
import string
import glob
''' This program compiles all (individual) saved excel files to compare different models in one environment
'''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
path_core = __location__+ "/Results/Train/"
print("OK")
# SELECT THE ENVIRONMENTS
# env_path_list = ["Env_1",
# "Env_2",
# "Env_3",
# "Env_8",
# "Env_9",
# "Env_10",
# "Env_11"]
env_path_list = ["Env_1",
"Env_2",
"Env_3",
"Env_4"]
env_path_list = ["Env_1"]
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', 'AO', 'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY', 'AZ']
list_sheets = ["Run_Conf", "Score", "Percent", "Loss", "Time"]
for env_path in env_path_list:
file_path_list = []
path = path_core + env_path + "/Train_Env_1_DQN*.xlsx"
for fname in sorted(glob.glob(path)):
file_path_list.append(fname)
print("LEN(FILE_PATH_LIST):", len(file_path_list))
load_path = __location__+ "/Results/Train/Compare_Models.xlsx"
excel_data_base = pd.ExcelFile(load_path)
load_path_new = __location__+ "/Results/Train/" + env_path + "/Compare_Models_new_" + env_path + ".xlsx"
excel_writer_to_append = pd.ExcelWriter(load_path_new)
workbook = excel_writer_to_append.book
excel_data_base_col = pd.read_excel(excel_data_base, sheetname="Run_Conf")
df_Run_Conf_list = pd.DataFrame()
df_Score_list = pd.DataFrame()
df_Percent_list = pd.DataFrame()
df_Loss_list = pd.DataFrame()
df_Time_list = pd.DataFrame()
for i in range(len(file_path_list)):
print("File:", i)
excel_file = pd.ExcelFile(file_path_list[i])
# print("excel_file ", excel_file )
df_Run_Conf = pd.read_excel(excel_file, sheetname=list_sheets[0], converters={'A': str})
df_Run_Conf = df_Run_Conf.set_index(list_sheets[0])
df_Score = pd.read_excel(excel_file, sheetname=list_sheets[1], parse_cols="A:B")
df_Score = df_Score.set_index(list_sheets[1])
df_Percent = pd.read_excel(excel_file, sheetname=list_sheets[2], parse_cols="A:B")
df_Percent = df_Percent.set_index(list_sheets[2])
df_Loss = pd.read_excel(excel_file, sheetname=list_sheets[3], parse_cols="A:B")
df_Loss = df_Loss.set_index(list_sheets[3])
df_Time = pd.read_excel(excel_file, sheetname=list_sheets[4], parse_cols="A:B")
df_Time = df_Time.set_index(list_sheets[4])
df_Run_Conf_list = pd.concat([df_Run_Conf_list, df_Run_Conf], axis=1, join="outer")
df_Score_list = pd.concat([df_Score_list, df_Score], axis=1, join="outer")
df_Percent_list = pd.concat([df_Percent_list, df_Percent], axis=1, join="outer")
df_Loss_list = pd.concat([df_Loss_list, df_Loss], axis=1, join="outer")
df_Time_list = | pd.concat([df_Time_list, df_Time], axis=1, join="outer") | pandas.concat |
import pandas as pd
import numpy as np
from math import sqrt
from functools import partial
import plotnine as p9
import re
import os
import xml.etree.ElementTree as ET
_rows = np.array([chr(x) for x in range(65, 91)]
+ ['A' + chr(x) for x in range(65, 71)])
def welln2well(wells, form):
form = int(form)
if form not in [96, 384, 1536]:
raise ValueError('Only formats 96, 384 and 1536 supported.')
n_cols = int(sqrt(form/2*3))
wells = wells if type(wells) == np.ndarray else np.array(wells, dtype=np.int)
if np.any(wells >= form) or np.any(wells < 0):
raise ValueError('welln out of range')
rr = _rows[wells // n_cols]
cc = (wells % n_cols + 1).astype(str)
return np.core.defchararray.add(rr, cc)
welln2well_96 = partial(welln2well, form=96)
welln2well_384 = partial(welln2well, form=384)
welln2well_1536 = partial(welln2well, form=1536)
def well2welln(wells, form):
form = int(form)
if form not in [96, 384, 1536]:
raise ValueError('Only formats 96, 384 and 1536 supported.')
n_cols = int(sqrt(form/2*3))
wells = wells if type(wells) == np.ndarray else np.array(wells, dtype=np.str)
_well_regex = re.compile('^([A-Z]{1,2})(\d{1,2})')
def _w2wn(well, n_cols):
match = _well_regex.match(well)
if not match:
raise ValueError('Well not recognized: "%s"' % well)
rr, cc = match.group(1), match.group(2)
rrn = ord(rr) - 65 if len(rr) == 1 else ord(rr[1]) - 39
ccn = int(cc) - 1
return rrn * n_cols + ccn
_vw2wn = np.vectorize(_w2wn, excluded=('n_cols'))
wns = _vw2wn(wells, n_cols)
if np.any(wns >= form) or np.any(wns < 0):
raise ValueError('welln out of range')
return wns
well2welln_96 = partial(well2welln, form=96)
well2welln_384 = partial(well2welln, form=384)
well2welln_1536 = partial(well2welln, form=1536)
def plot_picklist(picklist, form, fill='v', show='target', **kwargs):
if type(picklist) == str:
picklist = read_picklist(picklist)
if show == 'source':
plate = 's_plate'
elif show == 'target':
plate = 't_plate'
elif show == 'sourcesum':
picklist = picklist.groupby(['s_well', 's_plate'])['v'].sum().reset_index()
plate, show = 's_plate', 'source'
elif show == 'targetsum':
picklist = picklist.groupby(['t_well', 't_plate'])['v'].sum().reset_index()
plate, show = 't_plate', 'target'
else:
raise ValueError('Parametr "show" must be in {source, target, sourcesum, targetsum}')
for barcode, pl in picklist.groupby(plate):
gg = plot_plate(pl, form, fill=fill, alpha=.5, show=show, **kwargs)
gg += p9.ggtitle(barcode)
gg.draw()
def read_picklist(fn):
return pd.read_csv(fn, names=['s_plate', 's_well', 't_plate', 'v', 't_well'])
def write_picklist(df, fn):
if 's_well' not in df.columns:
# df['s_well'] = we don't know the format!
pass
(df[['s_plate', 's_well', 't_plate', 'v', 't_well']]
.to_csv(fn, header=None, line_terminator='\r\n', index=False))
def read_survey(fn):
tree = ET.parse(fn)
root = tree.getroot()
form = int(re.match('^\d+', root.attrib['name']).group())
ll = [{'well': child.attrib['n'], 'v': float(child.attrib['vl'])}
for child in root if float(child.attrib['vl']) > 0.0]
df = | pd.DataFrame(ll) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Master_NBA_Predictive_Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16mdsw4rUN3jcKETlA4rHSXlp1Hjr4raK
"""
from argparse import ArgumentParser
import pandas as pd
import random as rnd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
MULTIPLE_SEASON = pd.read_excel('data/output_multiple_season.xlsx')
def calculate(home_team, away_team):
pd.options.display.float_format = '{:.2f}'.format
homeCourtAdvantage = rnd.randint(3, 5)
url = "https://www.basketball-reference.com/teams/{}/2020_games.html".format(
home_team)
dfs = pd.read_html(url)
home_team_df = | pd.concat(dfs) | pandas.concat |
import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDescribe:
def test_describe_bool_in_mixed_frame(self):
df = DataFrame(
{
"string_data": ["a", "b", "c", "d", "e"],
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
}
)
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame(
{"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=["bool"])
expected = DataFrame(
{"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
)
tm.assert_frame_equal(result, expected)
def test_describe_empty_object(self):
# GH#27183
df = pd.DataFrame({"A": [None, None]}, dtype=object)
result = df.describe()
expected = pd.DataFrame(
{"A": [0, 0, np.nan, np.nan]},
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
result = df.iloc[:0].describe()
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH#13891
df = pd.DataFrame(
{
"bool_data_1": [False, False, True, True],
"bool_data_2": [False, True, True, True],
}
)
result = df.describe()
expected = DataFrame(
{"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{
"bool_data": [False, False, True, True, False],
"int_data": [0, 1, 2, 3, 4],
}
)
result = df.describe()
expected = DataFrame(
{"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
)
result = df.describe()
expected = DataFrame(
{"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(
["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_empty_categorical_column(self):
# GH#26397
# Ensure the index of an an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = pd.DataFrame({"empty_col": Categorical([])})
result = df.describe()
expected = DataFrame(
{"empty_col": [0, 0, np.nan, np.nan]},
index=["count", "unique", "top", "freq"],
dtype="object",
)
tm.assert_frame_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2, 0])
assert np.isnan(result.iloc[3, 0])
def test_describe_categorical_columns(self):
# GH#11558
columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
df = DataFrame(
{
"int1": [10, 20, 30, 40, 50],
"int2": [10, 20, 30, 40, 50],
"obj": ["A", 0, None, "X", 1],
},
columns=columns,
)
result = df.describe()
exp_columns = pd.CategoricalIndex(
["int1", "int2"],
categories=["int1", "int2", "obj"],
ordered=True,
name="XXX",
)
expected = DataFrame(
{
"int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
"int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
columns=exp_columns,
)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values, expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"],
freq="MS",
tz="US/Eastern",
name="XXX",
)
df = DataFrame(
{
0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ["A", 0, None, "X", 1],
}
)
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
)
expected = DataFrame(
{
0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == "MS"
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH#6145
t1 = pd.timedelta_range("1 days", freq="D", periods=5)
t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
df = pd.DataFrame({"t1": t1, "t2": t2})
expected = DataFrame(
{
"t1": [
5,
pd.Timedelta("3 days"),
df.iloc[:, 0].std(),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.Timedelta("4 days"),
pd.Timedelta("5 days"),
],
"t2": [
5,
pd.Timedelta("3 hours"),
df.iloc[:, 1].std(),
pd.Timedelta("1 hours"),
pd.Timedelta("2 hours"),
pd.Timedelta("3 hours"),
pd.Timedelta("4 hours"),
pd.Timedelta("5 hours"),
],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (
" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00"
)
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH#21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({"s1": s1, "s2": s2})
expected = DataFrame(
{
"s1": [5, 2, 0, 1, 2, 3, 4, 1.581139],
"s2": [
5,
Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
s2[1],
s2[2],
s2[3],
end.tz_localize(tz),
np.nan,
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
def test_describe_percentiles_integer_idx(self):
# GH#26660
df = | pd.DataFrame({"x": [1]}) | pandas.DataFrame |
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import datetime
# from datetime import datetime
dire = '../../data/'
start = datetime.datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/insert_action_train2.csv', encoding='utf-8')
city = pd.read_csv(dire + 'train/city.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/insert_action_test2.csv', encoding='utf-8')
# """
############# 3.action feature_3 #############
"""
# 1. 全部浏览记录中0-9出现的次数
# 2. 对应浏览记录中0-9出现的次数
# 3. 全部浏览记录浏览时间
# 4. 对应浏览记录浏览时间
# 5. 对应浏览记录是否出现5 6
# """
# 全部浏览记录中0-9出现的次数
def count_56789(orderFuture, action):
action_1 = action[action['actionType'] == 1]
action_2 = action[action['actionType'] == 2]
action_3 = action[action['actionType'] == 3]
action_4 = action[action['actionType'] == 4]
action_5 = action[action['actionType'] == 5]
action_6 = action[action['actionType'] == 6]
action_7 = action[action['actionType'] == 7]
action_8 = action[action['actionType'] == 8]
action_9 = action[action['actionType'] == 9]
action_1 = action_1.groupby(action_1.userid)['actionType'].count().reset_index() # 每个用户1操作的总数
action_2 = action_2.groupby(action_2.userid)['actionType'].count().reset_index() # 每个用户2操作的总数
action_3 = action_3.groupby(action_3.userid)['actionType'].count().reset_index() # 每个用户3操作的总数
action_4 = action_4.groupby(action_4.userid)['actionType'].count().reset_index() # 每个用户4操作的总数
action_5 = action_5.groupby(action_5.userid)['actionType'].count().reset_index() # 每个用户5操作的总数
action_6 = action_6.groupby(action_6.userid)['actionType'].count().reset_index() # 每个用户6操作的总数
action_7 = action_7.groupby(action_7.userid)['actionType'].count().reset_index() # 每个用户7操作的总数
action_8 = action_8.groupby(action_8.userid)['actionType'].count().reset_index() # 每个用户8操作的总数
action_9 = action_9.groupby(action_9.userid)['actionType'].count().reset_index() # 每个用户9操作的总数
action_all = action.groupby(action.userid)['actionType'].count().reset_index() # 每个用户 操作的总数
action_1.rename(columns={'actionType': 'action_1'}, inplace=True)
action_2.rename(columns={'actionType': 'action_2'}, inplace=True)
action_3.rename(columns={'actionType': 'action_3'}, inplace=True)
action_4.rename(columns={'actionType': 'action_4'}, inplace=True)
action_5.rename(columns={'actionType': 'action_5'}, inplace=True)
action_6.rename(columns={'actionType': 'action_6'}, inplace=True)
action_7.rename(columns={'actionType': 'action_7'}, inplace=True)
action_8.rename(columns={'actionType': 'action_8'}, inplace=True)
action_9.rename(columns={'actionType': 'action_9'}, inplace=True)
action_all.rename(columns={'actionType': 'action_all'}, inplace=True)
orderFuture = pd.merge(orderFuture, action_1, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_2, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_3, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_4, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_5, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_6, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_7, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_8, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_9, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_all, on='userid', how='left')
orderFuture['action_1_rate'] = orderFuture['action_1']/orderFuture['action_all'] # 每个用户1操作的次数占总数的比
orderFuture['action_2_rate'] = orderFuture['action_2']/orderFuture['action_all'] # 每个用户2操作的次数占总数的比
orderFuture['action_3_rate'] = orderFuture['action_3']/orderFuture['action_all'] # 每个用户3操作的次数占总数的比
orderFuture['action_4_rate'] = orderFuture['action_4']/orderFuture['action_all'] # 每个用户4操作的次数占总数的比
orderFuture['action_5_rate'] = orderFuture['action_5']/orderFuture['action_all'] # 每个用户5操作的次数占总数的比
orderFuture['action_6_rate'] = orderFuture['action_6']/orderFuture['action_all'] # 每个用户6操作的次数占总数的比
orderFuture['action_7_rate'] = orderFuture['action_7']/orderFuture['action_all'] # 每个用户7操作的次数占总数的比
orderFuture['action_8_rate'] = orderFuture['action_8']/orderFuture['action_all'] # 每个用户8操作的次数占总数的比
orderFuture['action_9_rate'] = orderFuture['action_9']/orderFuture['action_all'] # 每个用户9操作的次数占总数的比
# print(orderFuture)
return orderFuture
orderFuture_train = count_56789(orderFuture_train, action_train)
orderFuture_test = count_56789(orderFuture_test, action_test)
# 对应浏览记录中0-9出现的次数
def count_1_9(orderFuture, action):
action_1 = action[(action['actionType'] == 1) & (action.orderid.isnull())]
action_2 = action[(action['actionType'] == 2) & (action.orderid.isnull())]
action_3 = action[(action['actionType'] == 3) & (action.orderid.isnull())]
action_4 = action[(action['actionType'] == 4) & (action.orderid.isnull())]
action_5 = action[(action['actionType'] == 5) & (action.orderid.isnull())]
action_6 = action[(action['actionType'] == 6) & (action.orderid.isnull())]
action_7 = action[(action['actionType'] == 7) & (action.orderid.isnull())]
action_8 = action[(action['actionType'] == 8) & (action.orderid.isnull())]
action_9 = action[(action['actionType'] == 9) & (action.orderid.isnull())]
action_all = action[action.orderid.isnull()]
action_1 = action_1.groupby(action_1.userid)['actionType'].count().reset_index() # 每个用户1操作的总数
action_2 = action_2.groupby(action_2.userid)['actionType'].count().reset_index() # 每个用户2操作的总数
action_3 = action_3.groupby(action_3.userid)['actionType'].count().reset_index() # 每个用户3操作的总数
action_4 = action_4.groupby(action_4.userid)['actionType'].count().reset_index() # 每个用户4操作的总数
action_5 = action_5.groupby(action_5.userid)['actionType'].count().reset_index() # 每个用户5操作的总数
action_6 = action_6.groupby(action_6.userid)['actionType'].count().reset_index() # 每个用户6操作的总数
action_7 = action_7.groupby(action_7.userid)['actionType'].count().reset_index() # 每个用户7操作的总数
action_8 = action_8.groupby(action_8.userid)['actionType'].count().reset_index() # 每个用户8操作的总数
action_9 = action_9.groupby(action_9.userid)['actionType'].count().reset_index() # 每个用户9操作的总数
action_all = action_all.groupby(action_all.userid)['actionType'].count().reset_index() # 每个用户 操作的总数
action_1.rename(columns={'actionType': 'action_1_c'}, inplace=True)
action_2.rename(columns={'actionType': 'action_2_c'}, inplace=True)
action_3.rename(columns={'actionType': 'action_3_c'}, inplace=True)
action_4.rename(columns={'actionType': 'action_4_c'}, inplace=True)
action_5.rename(columns={'actionType': 'action_5_c'}, inplace=True)
action_6.rename(columns={'actionType': 'action_6_c'}, inplace=True)
action_7.rename(columns={'actionType': 'action_7_c'}, inplace=True)
action_8.rename(columns={'actionType': 'action_8_c'}, inplace=True)
action_9.rename(columns={'actionType': 'action_9_c'}, inplace=True)
action_all.rename(columns={'actionType': 'action_all_c'}, inplace=True)
orderFuture = pd.merge(orderFuture, action_1, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_2, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_3, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_4, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_5, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_6, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_7, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_8, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_9, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_all, on='userid', how='left')
orderFuture['action_1_rate_c'] = orderFuture['action_1_c']/orderFuture['action_all_c'] # 每个用户1操作的次数占总数的比
orderFuture['action_2_rate_c'] = orderFuture['action_2_c']/orderFuture['action_all_c'] # 每个用户2操作的次数占总数的比
orderFuture['action_3_rate_c'] = orderFuture['action_3_c']/orderFuture['action_all_c'] # 每个用户3操作的次数占总数的比
orderFuture['action_4_rate_c'] = orderFuture['action_4_c']/orderFuture['action_all_c'] # 每个用户4操作的次数占总数的比
orderFuture['action_5_rate_c'] = orderFuture['action_5_c']/orderFuture['action_all_c'] # 每个用户5操作的次数占总数的比
orderFuture['action_6_rate_c'] = orderFuture['action_6_c']/orderFuture['action_all_c'] # 每个用户6操作的次数占总数的比
orderFuture['action_7_rate_c'] = orderFuture['action_7_c']/orderFuture['action_all_c'] # 每个用户7操作的次数占总数的比
orderFuture['action_8_rate_c'] = orderFuture['action_8_c']/orderFuture['action_all_c'] # 每个用户8操作的次数占总数的比
orderFuture['action_9_rate_c'] = orderFuture['action_9_c']/orderFuture['action_all_c'] # 每个用户9操作的次数占总数的比
# print(orderFuture)
return orderFuture
orderFuture_train = count_1_9(orderFuture_train, action_train)
orderFuture_test = count_1_9(orderFuture_test, action_test)
# 全部浏览记录浏览时间
def action_time(orderFuture, action):
first_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).first().reset_index()
last_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).last().reset_index()
first_action['action_time'] = last_action['actionTime'] - first_action['actionTime']
orderFuture = pd.merge(orderFuture, first_action[['userid', 'action_time']], on='userid', how='left')
return orderFuture
orderFuture_train = action_time(orderFuture_train, action_train)
orderFuture_test = action_time(orderFuture_test, action_test)
# 对应浏览记录浏览时间
def action_time_c(orderFuture, action):
action = action[action.orderid.isnull()]
first_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).first().reset_index()
last_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).last().reset_index()
first_action['action_time_c'] = last_action['actionTime'] - first_action['actionTime']
orderFuture = pd.merge(orderFuture, first_action[['userid', 'action_time_c']], on='userid', how='left')
return orderFuture
orderFuture_train = action_time_c(orderFuture_train, action_train)
orderFuture_test = action_time_c(orderFuture_test, action_test)
# 全部浏览记录是否出现56 67 78 89
def appear_56(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_56_count', 'action_67_count', 'action_78_count', 'action_89_count'])
userid = []
action_56_count = []
action_67_count = []
action_78_count = []
action_89_count = []
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count56 = 0
count67 = 0
count78 = 0
count89 = 0
for i in range(len(action1)):
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6)
and (action1['actionType_time'][i] < 1800)):
count56 = count56 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7)
and (action1['actionType_time'][i] < 1800)):
count67 = count67 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8)
and (action1['actionType_time'][i] < 1800)):
count78 = count78 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 8) and (action1['actionType'][i + 1] == 9)
and (action1['actionType_time'][i] < 1800)):
count89 = count89 + 1
userid.append(row.userid)
action_56_count.append(count56)
action_67_count.append(count67)
action_78_count.append(count78)
action_89_count.append(count89)
count['userid'] = userid
count['action_56_count'] = action_56_count
count['action_67_count'] = action_67_count
count['action_78_count'] = action_78_count
count['action_89_count'] = action_89_count
orderFuture = pd.merge(orderFuture, count[['userid', 'action_56_count', 'action_67_count', 'action_78_count', 'action_89_count']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_56(orderFuture_train, action_train)
orderFuture_test = appear_56(orderFuture_test, action_test)
# 对应浏览记录是否出现56 67 78 89
def appear_56_c(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_56_count_c', 'action_67_count_c', 'action_78_count_c', 'action_89_count_c'])
userid = []
action_56_count_c = []
action_67_count_c = []
action_78_count_c = []
action_89_count_c = []
action = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count56 = 0
count67 = 0
count78 = 0
count89 = 0
for i in range(len(action1)):
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6)
and (action1['actionType_time'][i] < 1800)):
count56 = count56 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7)
and (action1['actionType_time'][i] < 1800)):
count67 = count67 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8)
and (action1['actionType_time'][i] < 1800)):
count78 = count78 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 8) and (action1['actionType'][i + 1] == 9)
and (action1['actionType_time'][i] < 1800)):
count89 = count89 + 1
userid.append(row.userid)
action_56_count_c.append(count56)
action_67_count_c.append(count67)
action_78_count_c.append(count78)
action_89_count_c.append(count89)
count['userid'] = userid
count['action_56_count_c'] = action_56_count_c
count['action_67_count_c'] = action_67_count_c
count['action_78_count_c'] = action_78_count_c
count['action_89_count_c'] = action_89_count_c
orderFuture = pd.merge(orderFuture, count[['userid', 'action_56_count_c', 'action_67_count_c', 'action_78_count_c', 'action_89_count_c']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_56_c(orderFuture_train, action_train)
orderFuture_test = appear_56_c(orderFuture_test, action_test)
# 全部浏览记录是否出现567 678 789 566
def appear_567(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_567_count', 'action_678_count', 'action_789_count', 'action_566_count'])
userid = []
action_567_count = []
action_678_count = []
action_789_count = []
action_566_count = []
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count567 = 0
count678 = 0
count789 = 0
count566 = 0
for i in range(len(action1)):
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 7)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count567 = count567 + 1
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7) and (action1['actionType'][i + 2] == 8)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count678 = count678 + 1
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8) and (action1['actionType'][i + 2] == 9)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count789 = count789 + 1
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 6)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count566 = count566 + 1
userid.append(row.userid)
action_567_count.append(count567)
action_678_count.append(count678)
action_789_count.append(count789)
action_566_count.append(count566)
count['userid'] = userid
count['action_567_count'] = action_567_count
count['action_678_count'] = action_678_count
count['action_789_count'] = action_789_count
count['action_566_count'] = action_566_count
orderFuture = pd.merge(orderFuture, count[['userid', 'action_567_count', 'action_678_count', 'action_789_count', 'action_566_count']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_567(orderFuture_train, action_train)
orderFuture_test = appear_567(orderFuture_test, action_test)
# 对应浏览记录是否出现567 678 789 566
def appear_567_c(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_567_count_c', 'action_678_count_c', 'action_789_count_c', 'action_566_count_c'])
userid = []
action_567_count_c = []
action_678_count_c = []
action_789_count_c = []
action_566_count_c = []
action = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count567 = 0
count678 = 0
count789 = 0
count566 = 0
for i in range(len(action1)):
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 7)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count567 = count567 + 1
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7) and (action1['actionType'][i + 2] == 8)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count678 = count678 + 1
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8) and (action1['actionType'][i + 2] == 9)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count789 = count789 + 1
if (((i + 2) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 6)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800)):
count566 = count566 + 1
userid.append(row.userid)
action_567_count_c.append(count567)
action_678_count_c.append(count678)
action_789_count_c.append(count789)
action_566_count_c.append(count566)
count['userid'] = userid
count['action_567_count_c'] = action_567_count_c
count['action_678_count_c'] = action_678_count_c
count['action_789_count_c'] = action_789_count_c
count['action_566_count_c'] = action_566_count_c
orderFuture = pd.merge(orderFuture, count[['userid', 'action_567_count_c', 'action_678_count_c', 'action_789_count_c', 'action_566_count_c']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_567_c(orderFuture_train, action_train)
orderFuture_test = appear_567_c(orderFuture_test, action_test)
# 全部浏览记录是否出现5678 6789
def appear_5678(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_5678_count', 'action_6789_count'])
userid = []
action_5678_count = []
action_6789_count = []
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count5678 = 0
count6789 = 0
for i in range(len(action1)):
if (((i + 3) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 7) and (action1['actionType'][i + 3] == 8)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800) and (action1['actionType_time'][i + 2] < 1800)):
count5678 = count5678 + 1
if (((i + 3) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7) and (action1['actionType'][i + 2] == 8) and (action1['actionType'][i + 3] == 9)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800) and (action1['actionType_time'][i + 2] < 1800)):
count6789 = count6789 + 1
userid.append(row.userid)
action_5678_count.append(count5678)
action_6789_count.append(count6789)
count['userid'] = userid
count['action_5678_count'] = action_5678_count
count['action_6789_count'] = action_6789_count
orderFuture = pd.merge(orderFuture, count[['userid', 'action_5678_count', 'action_6789_count']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_5678(orderFuture_train, action_train)
orderFuture_test = appear_5678(orderFuture_test, action_test)
# 对应浏览记录是否出现5678 6789
def appear_5678_c(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_5678_count_c', 'action_6789_count_c'])
userid = []
action_5678_count_c = []
action_6789_count_c = []
action = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count5678 = 0
count6789 = 0
for i in range(len(action1)):
if (((i + 3) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 7) and (action1['actionType'][i + 3] == 8)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800) and (action1['actionType_time'][i + 2] < 1800)):
count5678 = count5678 + 1
if (((i + 3) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7) and (action1['actionType'][i + 2] == 8) and (action1['actionType'][i + 3] == 9)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800) and (action1['actionType_time'][i + 2] < 1800)):
count6789 = count6789 + 1
userid.append(row.userid)
action_5678_count_c.append(count5678)
action_6789_count_c.append(count6789)
count['userid'] = userid
count['action_5678_count_c'] = action_5678_count_c
count['action_6789_count_c'] = action_6789_count_c
orderFuture = pd.merge(orderFuture, count[['userid', 'action_5678_count_c', 'action_6789_count_c']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_5678_c(orderFuture_train, action_train)
orderFuture_test = appear_5678_c(orderFuture_test, action_test)
# 全部浏览记录是否出现56789
def appear_56789(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_56789_count'])
userid = []
action_56789_count = []
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count56789 = 0
for i in range(len(action1)):
if (((i + 4) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 7) and (action1['actionType'][i + 3] == 8) and (action1['actionType'][i + 4] == 9)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800) and (action1['actionType_time'][i + 2] < 1800) and (action1['actionType_time'][i + 3] < 1800)):
count56789 = count56789 + 1
userid.append(row.userid)
action_56789_count.append(count56789)
count['userid'] = userid
count['action_56789_count'] = action_56789_count
orderFuture = pd.merge(orderFuture, count[['userid', 'action_56789_count']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_56789(orderFuture_train, action_train)
orderFuture_test = appear_56789(orderFuture_test, action_test)
# 对应浏览记录是否出现56789
def appear_56789_c(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_56789_count_c'])
userid = []
action_56789_count_c = []
action = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count56789 = 0
for i in range(len(action1)):
if (((i + 4) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6) and (action1['actionType'][i + 2] == 7) and (action1['actionType'][i + 3] == 8) and (action1['actionType'][i + 4] == 9)
and (action1['actionType_time'][i] < 1800) and (action1['actionType_time'][i + 1] < 1800) and ( action1['actionType_time'][i + 2] < 1800) and (action1['actionType_time'][i + 3] < 1800)):
count56789 = count56789 + 1
userid.append(row.userid)
action_56789_count_c.append(count56789)
count['userid'] = userid
count['action_56789_count_c'] = action_56789_count_c
orderFuture = pd.merge(orderFuture, count[['userid', 'action_56789_count_c']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_56789_c(orderFuture_train, action_train)
orderFuture_test = appear_56789_c(orderFuture_test, action_test)
############# 3.action feature_5 #############
"""
# 1. action中大于6出现的次数
# 2. 对应点击2-4的和值 与 5-9 的比值
# 3. 全部点击2-4的和值 与 5-9 的比值
# 4. 对应浏览记录 1-9 操作所用平均时间
# 5. 全部浏览记录 1-9 操作所用平均时间
# """
# action中大于6出现的次数
def greater_6_c(orderFuture):
action_7_c = orderFuture['action_7_c'].fillna(0)
action_8_c = orderFuture['action_8_c'].fillna(0)
action_9_c = orderFuture['action_9_c'].fillna(0)
orderFuture['action_greater_7_c'] = action_7_c + action_8_c + action_9_c
return orderFuture
orderFuture_train = greater_6_c(orderFuture_train)
orderFuture_test = greater_6_c(orderFuture_test)
# 对应点击2-4的和值 与 5-9 的比值
def rate_24_59_c(orderFuture):
action = orderFuture.fillna(0)
orderFuture['rate_1_59_c'] = (action['action_1_c'])/(action['action_5_c'] + action['action_6_c'] + action['action_7_c'] + action['action_8_c'] + action['action_9_c'])
orderFuture['rate_24_59_c'] = (action['action_2_c'] + action['action_3_c'] + action['action_4_c'])/(action['action_5_c'] + action['action_6_c'] + action['action_7_c'] + action['action_8_c'] + action['action_9_c'])
# orderFuture['rate_time_1_59_c'] = (action['time_1_c'])/(action['time_5_c'] + action['time_6_c'] + action['time_7_c'] + action['time_8_c'] + action['time_9_c'])
return orderFuture
orderFuture_train = rate_24_59_c(orderFuture_train)
orderFuture_test = rate_24_59_c(orderFuture_test)
# 全部点击2-4的和值 与 5-9 的比值
def rate_24_59(orderFuture):
action = orderFuture.fillna(0)
orderFuture['rate_1_59'] = (action['action_1'])/(action['action_5'] + action['action_6'] + action['action_7'] + action['action_8'] + action['action_9'])
orderFuture['rate_24_59'] = (action['action_2'] + action['action_3'] + action['action_4'])/(action['action_5'] + action['action_6'] + action['action_7'] + action['action_8'] + action['action_9'])
# orderFuture['rate_time_1_59'] = (action['time_1'])/(action['time_5'] + action['time_6'] + action['time_7'] + action['time_8'] + action['time_9'])
return orderFuture
orderFuture_train = rate_24_59(orderFuture_train)
orderFuture_test = rate_24_59(orderFuture_test)
# 全部action 最后一次 的类型
def latest_actionType(orderFuture, action):
latest = action.groupby(['userid']).last().reset_index()
latest.rename(columns={'actionType': 'latest_actionType'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'latest_actionType']], on='userid', how='left')
return orderFuture
orderFuture_train = latest_actionType(orderFuture_train, action_train)
orderFuture_test = latest_actionType(orderFuture_test, action_test)
# 全部 action 倒数第2-6次操作的类型
def latest2_actionType(orderFuture, action):
userid = []
latest_2_actionType = []
latest_3_actionType = []
latest_4_actionType = []
latest_5_actionType = []
latest_6_actionType = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
if(row.userid == action['userid'][row.actionTime-1]):
latest_2_actionType.append(action['actionType'][row.actionTime-1])
else:
latest_2_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 2]):
latest_3_actionType.append(action['actionType'][row.actionTime - 2])
else:
latest_3_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 3]):
latest_4_actionType.append(action['actionType'][row.actionTime - 3])
else:
latest_4_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 4]):
latest_5_actionType.append(action['actionType'][row.actionTime - 4])
else:
latest_5_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 5]):
latest_6_actionType.append(action['actionType'][row.actionTime - 5])
else:
latest_6_actionType.append(None)
latest_2['latest_2_actionType'] = latest_2_actionType
latest_2['latest_3_actionType'] = latest_3_actionType
latest_2['latest_4_actionType'] = latest_4_actionType
latest_2['latest_5_actionType'] = latest_5_actionType
latest_2['latest_6_actionType'] = latest_6_actionType
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_2_actionType', 'latest_3_actionType',
'latest_4_actionType', 'latest_5_actionType', 'latest_6_actionType']], on='userid', how='left')
return orderFuture
orderFuture_train = latest2_actionType(orderFuture_train, action_train)
orderFuture_test = latest2_actionType(orderFuture_test, action_test)
# 时间间隔
# 最后1 2 3 4 次操作的时间间隔
# 时间间隔的均值 最小值 最大值 方差
def time_interval(orderFuture, action):
# 1 2 3 4 5 6
userid = []
latest_1_time_interval = []
latest_2_time_interval = []
latest_3_time_interval = []
latest_4_time_interval = []
latest_5_time_interval = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 1
latest_1_time_interval.append(action['actionType_time'][row.max_index - 1])
# 2
if (row.userid == action['userid'][row.max_index - 2]):
latest_2_time_interval.append(action['actionType_time'][row.max_index - 2])
else:
latest_2_time_interval.append(None)
# 3
if (row.userid == action['userid'][row.max_index - 3]):
latest_3_time_interval.append(action['actionType_time'][row.max_index - 3])
else:
latest_3_time_interval.append(None)
# 4
if (row.userid == action['userid'][row.max_index - 4]):
latest_4_time_interval.append(action['actionType_time'][row.max_index - 4])
else:
latest_4_time_interval.append(None)
# 5
if (row.userid == action['userid'][row.max_index - 5]):
latest_5_time_interval.append(action['actionType_time'][row.max_index - 5])
else:
latest_5_time_interval.append(None)
latest_2['latest_1_time_interval'] = latest_1_time_interval
latest_2['latest_2_time_interval'] = latest_2_time_interval
latest_2['latest_3_time_interval'] = latest_3_time_interval
latest_2['latest_4_time_interval'] = latest_4_time_interval
latest_2['latest_5_time_interval'] = latest_5_time_interval
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_1_time_interval', 'latest_2_time_interval', 'latest_3_time_interval',
'latest_4_time_interval', 'latest_5_time_interval']], on='userid', how='left')
# 均值
latest = action.groupby(['userid'])['actionType_time'].mean().reset_index()
latest.rename(columns={'actionType_time': 'actionType_time_mean'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'actionType_time_mean']], on='userid', how='left')
# 方差
latest = action.groupby(['userid'])['actionType_time'].agg({'actionType_time_var': 'var'}).reset_index()
orderFuture = pd.merge(orderFuture, latest[['userid', 'actionType_time_var']], on='userid', how='left')
# 最小值
latest = action.groupby(['userid'])['actionType_time'].min().reset_index()
latest.rename(columns={'actionType_time': 'actionType_time_min'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'actionType_time_min']], on='userid', how='left')
return orderFuture
orderFuture_train = time_interval(orderFuture_train, action_train)
orderFuture_test = time_interval(orderFuture_test, action_test)
# action 最后2 3 4 5 6 次操作时间的方差 和 均值
def var_actionTime(orderFuture, action):
userid = []
latest_3_actionTime_var = []
latest_4_actionTime_var = []
latest_5_actionTime_var = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 2
if ((row.userid == action['userid'][row.actionTime]) and (row.userid == action['userid'][row.actionTime - 1]) and
(row.userid == action['userid'][row.actionTime - 2])):
var = pd.Series([action['actionTime'][row.actionTime]-action['actionTime'][row.actionTime-1],
action['actionTime'][row.actionTime-1]-action['actionTime'][row.actionTime-2]
]).var()
latest_3_actionTime_var.append(var)
else:
latest_3_actionTime_var.append(None)
# 3
if ((row.userid == action['userid'][row.actionTime]) and (row.userid == action['userid'][row.actionTime - 1]) and
(row.userid == action['userid'][row.actionTime - 2]) and (row.userid == action['userid'][row.actionTime - 3])):
var = pd.Series([action['actionTime'][row.actionTime]-action['actionTime'][row.actionTime-1],
action['actionTime'][row.actionTime-1]-action['actionTime'][row.actionTime-2],
action['actionTime'][row.actionTime-2]-action['actionTime'][row.actionTime-3]
]).var()
latest_4_actionTime_var.append(var)
else:
latest_4_actionTime_var.append(None)
# 4
if ((row.userid == action['userid'][row.actionTime]) and (row.userid == action['userid'][row.actionTime - 1]) and
(row.userid == action['userid'][row.actionTime - 2]) and (row.userid == action['userid'][row.actionTime - 3]) and
(row.userid == action['userid'][row.actionTime - 4])):
var = pd.Series([action['actionTime'][row.actionTime]-action['actionTime'][row.actionTime-1],
action['actionTime'][row.actionTime-1]-action['actionTime'][row.actionTime-2],
action['actionTime'][row.actionTime-2]-action['actionTime'][row.actionTime-3],
action['actionTime'][row.actionTime-3]-action['actionTime'][row.actionTime-4]
]).var()
latest_5_actionTime_var.append(var)
else:
latest_5_actionTime_var.append(None)
latest_2['latest_3_actionTime_var'] = latest_3_actionTime_var
latest_2['latest_4_actionTime_var'] = latest_4_actionTime_var
latest_2['latest_5_actionTime_var'] = latest_5_actionTime_var
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_3_actionTime_var', 'latest_4_actionTime_var',
'latest_5_actionTime_var']], on='userid', how='left')
return orderFuture
orderFuture_train = var_actionTime(orderFuture_train, action_train)
orderFuture_test = var_actionTime(orderFuture_test, action_test)
# 对应浏览记录浏览平均时间(可以改成最近几天的)
def sum_actionType_time(orderFuture, action):
action = action[action.orderid.isnull()]
action1 = action.groupby(['userid'])['actionType_time'].sum().reset_index()
action1.rename(columns={'actionType_time': 'actionType_time_sum'}, inplace=True)
action2 = action.groupby(['userid', 'action_date']).count().reset_index()
action3 = action2.groupby(['userid'])['action_date'].count().reset_index()
action3.rename(columns={'action_date': 'days_action'}, inplace=True)
action3 = pd.merge(action1, action3, on='userid', how='left')
print(action3)
action3['actionType_time_day_avg'] = action3['actionType_time_sum']/action3['days_action']
orderFuture = pd.merge(orderFuture, action3[['userid', 'actionType_time_day_avg']], on='userid', how='left')
return orderFuture
orderFuture_train = sum_actionType_time(orderFuture_train, action_train)
orderFuture_test = sum_actionType_time(orderFuture_test, action_test)
# 对应浏览记录 1-9 操作所用平均时间
def avg_time_action_c(orderFuture, action, k):
time_k = []
select = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
print(index)
action_k = select[(select['actionType'] == k) & (select['userid'] == row.userid)]
if (len(action_k) == 0):
time = None
else:
time = 0
for index1, row1 in action_k.iterrows():
if(((index1 + 1) < len(action)) and (row1.userid == action['userid'][index1+1])):
time = time + (action['actionTime'][index1+1] - row1.actionTime)
time_k.append(time)
orderFuture['time_'+ str(k) +'_c'] = time_k
orderFuture['time_'+ str(k) +'_c'] = orderFuture['time_'+ str(k) +'_c']/orderFuture['action_'+ str(k) +'_c']
return orderFuture
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 9)
# 全部浏览记录 1-9 操作所用平均时间
def avg_time_action(orderFuture, action, k):
time_k = []
for index, row in orderFuture.iterrows():
print(index)
action_k = action[(action['actionType'] == k) & (action['userid'] == row.userid)]
if (len(action_k) == 0):
time = None
else:
time = 0
for index1, row1 in action_k.iterrows():
if(((index1 + 1) < len(action)) and (row1.userid == action['userid'][index1+1])):
time = time + (action['actionTime'][index1+1] - row1.actionTime)
time_k.append(time)
orderFuture['time_'+ str(k)] = time_k
orderFuture['time_'+ str(k)] = orderFuture['time_'+ str(k)]/orderFuture['action_'+ str(k)]
return orderFuture
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 9)
############# 3.action feature_7 #############
"""
# 1.
# 2.
# 3.
# 4.
# 5.
# """
# 最近7天的使用时间 eval-auc:0.963724
def latest_7day_count(orderFuture, action):
userid = []
latest_7day_actionType_time = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 7天
action1 = action[
(action['userid'] == row.userid) & (action['actionTime'] > action['actionTime'][row.max_index] - 604800)]
latest_7day_actionType_time.append(np.sum(action1.actionType_time))
latest_2['userid'] = userid
latest_2['latest_7day_actionType_time'] = latest_7day_actionType_time
print(latest_2)
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_7day_actionType_time']], on='userid', how='left')
return orderFuture
# orderFuture_train = latest_7day_count(orderFuture_train, action_train)
# orderFuture_test = latest_7day_count(orderFuture_test, action_test)
# 最近1天的操作1 2 4 5 6 7的次数
def latest_1day_actionType_count(orderFuture, action):
userid = []
latest_1day_actionType1_count = []
latest_1day_actionType2_count = []
latest_1day_actionType3_count = []
latest_1day_actionType4_count = []
latest_1day_actionType5_count = []
latest_1day_actionType6_count = []
latest_1day_actionType7_count = []
latest_1day_actionType8_count = []
latest_1day_actionType9_count = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
print(index)
userid.append(row.userid)
# 加速,先大选userid,再小选日期
action0 = action[action['userid'] == row.userid]
action_1 = action0[action0['action_date'] == action0['action_date'][row.max_index]]
action1 = action_1[action_1['actionType'] == 1]
action2 = action_1[action_1['actionType'] == 2]
action3 = action_1[action_1['actionType'] == 3]
action4 = action_1[action_1['actionType'] == 4]
action5 = action_1[action_1['actionType'] == 5]
action6 = action_1[action_1['actionType'] == 6]
action7 = action_1[action_1['actionType'] == 7]
action8 = action_1[action_1['actionType'] == 8]
action9 = action_1[action_1['actionType'] == 9]
latest_1day_actionType1_count.append(len(action1))
latest_1day_actionType2_count.append(len(action2))
latest_1day_actionType3_count.append(len(action3))
latest_1day_actionType4_count.append(len(action4))
latest_1day_actionType5_count.append(len(action5))
latest_1day_actionType6_count.append(len(action6))
latest_1day_actionType7_count.append(len(action7))
latest_1day_actionType8_count.append(len(action8))
latest_1day_actionType9_count.append(len(action9))
latest_2['userid'] = userid
latest_2['latest_1day_actionType1_count'] = latest_1day_actionType1_count
latest_2['latest_1day_actionType2_count'] = latest_1day_actionType2_count
latest_2['latest_1day_actionType3_count'] = latest_1day_actionType3_count
latest_2['latest_1day_actionType4_count'] = latest_1day_actionType4_count
latest_2['latest_1day_actionType5_count'] = latest_1day_actionType5_count
latest_2['latest_1day_actionType6_count'] = latest_1day_actionType6_count
latest_2['latest_1day_actionType7_count'] = latest_1day_actionType7_count
latest_2['latest_1day_actionType8_count'] = latest_1day_actionType8_count
latest_2['latest_1day_actionType9_count'] = latest_1day_actionType9_count
print(latest_2)
orderFuture = pd.merge(orderFuture, latest_2[['userid',
'latest_1day_actionType1_count', 'latest_1day_actionType2_count', 'latest_1day_actionType3_count',
'latest_1day_actionType4_count', 'latest_1day_actionType5_count', 'latest_1day_actionType6_count',
'latest_1day_actionType7_count', 'latest_1day_actionType8_count', 'latest_1day_actionType9_count'
]], on='userid', how='left')
return orderFuture
# orderFuture_train = latest_1day_actionType_count(orderFuture_train, action_train)
# orderFuture_test = latest_1day_actionType_count(orderFuture_test, action_test)
# 最近234567天的操作1 2 3 4 5 6 7 8 9的次数
def latest_2day_actionType_count(orderFuture, action, k):
print(k)
userid = []
latest_2day_actionType1_count = []
latest_2day_actionType2_count = []
latest_2day_actionType3_count = []
latest_2day_actionType4_count = []
latest_2day_actionType5_count = []
latest_2day_actionType6_count = []
latest_2day_actionType7_count = []
latest_2day_actionType8_count = []
latest_2day_actionType9_count = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 加速,先大选userid,再小选日期
action0 = action[action['userid'] == row.userid]
action0['action_date'] = pd.to_datetime(action0['action_date'])
action_2 = action0[action0['action_date'] >= (action0['action_date'][row.max_index] - datetime.timedelta(days=(k-1)))]
action1 = action_2[action_2['actionType'] == 1]
action2 = action_2[action_2['actionType'] == 2]
action3 = action_2[action_2['actionType'] == 3]
action4 = action_2[action_2['actionType'] == 4]
action5 = action_2[action_2['actionType'] == 5]
action6 = action_2[action_2['actionType'] == 6]
action7 = action_2[action_2['actionType'] == 7]
action8 = action_2[action_2['actionType'] == 8]
action9 = action_2[action_2['actionType'] == 9]
latest_2day_actionType1_count.append(len(action1))
latest_2day_actionType2_count.append(len(action2))
latest_2day_actionType3_count.append(len(action3))
latest_2day_actionType4_count.append(len(action4))
latest_2day_actionType5_count.append(len(action5))
latest_2day_actionType6_count.append(len(action6))
latest_2day_actionType7_count.append(len(action7))
latest_2day_actionType8_count.append(len(action8))
latest_2day_actionType9_count.append(len(action9))
latest_2['userid'] = userid
latest_2['latest_' + str(k) + 'day_actionType1_count'] = latest_2day_actionType1_count
latest_2['latest_' + str(k) + 'day_actionType2_count'] = latest_2day_actionType2_count
latest_2['latest_' + str(k) + 'day_actionType3_count'] = latest_2day_actionType3_count
latest_2['latest_' + str(k) + 'day_actionType4_count'] = latest_2day_actionType4_count
latest_2['latest_' + str(k) + 'day_actionType5_count'] = latest_2day_actionType5_count
latest_2['latest_' + str(k) + 'day_actionType6_count'] = latest_2day_actionType6_count
latest_2['latest_' + str(k) + 'day_actionType7_count'] = latest_2day_actionType7_count
latest_2['latest_' + str(k) + 'day_actionType8_count'] = latest_2day_actionType8_count
latest_2['latest_' + str(k) + 'day_actionType9_count'] = latest_2day_actionType9_count
orderFuture = pd.merge(orderFuture, latest_2[['userid',
'latest_' + str(k) + 'day_actionType1_count', 'latest_' + str(k) + 'day_actionType2_count', 'latest_' + str(k) + 'day_actionType3_count',
'latest_' + str(k) + 'day_actionType4_count', 'latest_' + str(k) + 'day_actionType5_count', 'latest_' + str(k) + 'day_actionType6_count',
'latest_' + str(k) + 'day_actionType7_count', 'latest_' + str(k) + 'day_actionType8_count', 'latest_' + str(k) + 'day_actionType9_count'
]], on='userid', how='left')
return orderFuture
# orderFuture_train = latest_2day_actionType_count(orderFuture_train, action_train, 2)
# orderFuture_test = latest_2day_actionType_count(orderFuture_test, action_test, 2)
# orderFuture_train = latest_2day_actionType_count(orderFuture_train, action_train, 3)
# orderFuture_test = latest_2day_actionType_count(orderFuture_test, action_test, 3)
# orderFuture_train = latest_2day_actionType_count(orderFuture_train, action_train, 4)
# orderFuture_test = latest_2day_actionType_count(orderFuture_test, action_test, 4)
# orderFuture_train = latest_2day_actionType_count(orderFuture_train, action_train, 5)
# orderFuture_test = latest_2day_actionType_count(orderFuture_test, action_test, 5)
# orderFuture_train = latest_2day_actionType_count(orderFuture_train, action_train, 6)
# orderFuture_test = latest_2day_actionType_count(orderFuture_test, action_test, 6)
# orderFuture_train = latest_2day_actionType_count(orderFuture_train, action_train, 7)
# orderFuture_test = latest_2day_actionType_count(orderFuture_test, action_test, 7)
# 离最近的1-9的距离(间隔操作次数) 只取 56789
def min_distance_k(orderFuture, action):
userid = []
min_distance_1 = []
min_distance_2 = []
min_distance_3 = []
min_distance_4 = []
min_distance_5 = []
min_distance_6 = []
min_distance_7 = []
min_distance_8 = []
min_distance_9 = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
print(index)
# print(row.userid)
# print(last_max_index)
# print(row.max_index)
userid.append(row.userid)
# 1
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 1):
min_distance_1.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 1):
min_distance_1.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_1.append(None)
break
else:
min_distance_1.append(None)
break
# 2
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 2):
min_distance_2.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 2):
min_distance_2.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_2.append(None)
break
else:
min_distance_2.append(None)
break
# 3
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 3):
min_distance_3.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 3):
min_distance_3.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_3.append(None)
break
else:
min_distance_3.append(None)
break
# 4
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 4):
min_distance_4.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 4):
min_distance_4.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_4.append(None)
break
else:
min_distance_4.append(None)
break
# 5
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 5):
min_distance_5.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 5):
min_distance_5.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_5.append(None)
break
else:
min_distance_5.append(None)
break
# 6
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 6):
min_distance_6.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 6):
min_distance_6.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_6.append(None)
break
else:
min_distance_6.append(None)
break
# 7
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 7):
min_distance_7.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 7):
min_distance_7.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_7.append(None)
break
else:
min_distance_7.append(None)
break
# 8
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 8):
min_distance_8.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 8):
min_distance_8.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_8.append(None)
break
else:
min_distance_8.append(None)
break
# 9
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 9):
min_distance_9.append(1)
break
if (action['actionType'][row.max_index - (i + 1)] == 9):
min_distance_9.append(i + 2)
break
else:
if ((i + 1) == row.max_index):
min_distance_9.append(None)
break
else:
min_distance_9.append(None)
break
latest_2['userid'] = userid
latest_2['min_distance_1'] = min_distance_1
latest_2['min_distance_2'] = min_distance_2
latest_2['min_distance_3'] = min_distance_3
latest_2['min_distance_4'] = min_distance_4
latest_2['min_distance_5'] = min_distance_5
latest_2['min_distance_6'] = min_distance_6
latest_2['min_distance_7'] = min_distance_7
latest_2['min_distance_8'] = min_distance_8
latest_2['min_distance_9'] = min_distance_9
print(latest_2)
orderFuture = pd.merge(orderFuture,
latest_2[['userid', 'min_distance_1', 'min_distance_2', 'min_distance_3', 'min_distance_4',
'min_distance_5', 'min_distance_6', 'min_distance_7', 'min_distance_8',
'min_distance_9']], on='userid', how='left')
return orderFuture
# orderFuture_test = min_distance_k(orderFuture_test, action_test)
# orderFuture_train = min_distance_k(orderFuture_train, action_train)
# 离最近的1-9的时间
def min_time_k(orderFuture, action):
userid = []
min_time_1 = []
min_time_2 = []
min_time_3 = []
min_time_4 = []
min_time_5 = []
min_time_6 = []
min_time_7 = []
min_time_8 = []
min_time_9 = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
# print(latest)
for index, row in latest.iterrows():
print(index)
# print(row.userid)
# print(last_max_index)
# print(row.max_index)
userid.append(row.userid)
# 1
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 1):
min_time_1.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 1):
min_time_1.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_1.append(None)
break
else:
min_time_1.append(None)
break
# 2
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 2):
min_time_2.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 2):
min_time_2.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_2.append(None)
break
else:
min_time_2.append(None)
break
# 3
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 3):
min_time_3.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 3):
min_time_3.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_3.append(None)
break
else:
min_time_3.append(None)
break
# 4
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 4):
min_time_4.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 4):
min_time_4.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_4.append(None)
break
else:
min_time_4.append(None)
break
# 5
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 5):
min_time_5.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 5):
min_time_5.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_5.append(None)
break
else:
min_time_5.append(None)
break
# 6
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 6):
min_time_6.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 6):
min_time_6.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_6.append(None)
break
else:
min_time_6.append(None)
break
# 7
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 7):
min_time_7.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 7):
min_time_7.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_7.append(None)
break
else:
min_time_7.append(None)
break
# 8
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 8):
min_time_8.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 8):
min_time_8.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_8.append(None)
break
else:
min_time_8.append(None)
break
# 9
for i in range(row.max_index):
if (row.userid == action['userid'][row.max_index - (i + 1)]):
if (action['actionType'][row.max_index] == 9):
min_time_9.append(0)
break
if (action['actionType'][row.max_index - (i + 1)] == 9):
min_time_9.append(action['actionTime'][row.max_index]-action['actionTime'][row.max_index - (i + 1)])
break
else:
if ((i + 1) == row.max_index):
min_time_9.append(None)
break
else:
min_time_9.append(None)
break
latest_2['userid'] = userid
latest_2['min_time_1'] = min_time_1
latest_2['min_time_2'] = min_time_2
latest_2['min_time_3'] = min_time_3
latest_2['min_time_4'] = min_time_4
latest_2['min_time_5'] = min_time_5
latest_2['min_time_6'] = min_time_6
latest_2['min_time_7'] = min_time_7
latest_2['min_time_8'] = min_time_8
latest_2['min_time_9'] = min_time_9
print(latest_2)
orderFuture = pd.merge(orderFuture, latest_2[['userid'
, 'min_time_1', 'min_time_2', 'min_time_3'
, 'min_time_4', 'min_time_5', 'min_time_6'
, 'min_time_7', 'min_time_8', 'min_time_9'
]], on='userid', how='left')
return orderFuture
orderFuture_train = min_time_k(orderFuture_train, action_train)
orderFuture_test = min_time_k(orderFuture_test, action_test)
# 56时间间隔总时间
def time_56_all(orderFuture, action):
count = pd.DataFrame(
columns=['userid', 'time_56_all', 'time_67_all', 'time_78_all', 'time_89_all'])
userid = []
time_56_all = []
time_67_all = []
time_78_all = []
time_89_all = []
for index, row in orderFuture.iterrows():
print(index)
action1 = action[action['userid'] == row.userid].reset_index()
count56 = 0
count67 = 0
count78 = 0
count89 = 0
for i in range(len(action1)):
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6)):
count56 = count56 + (action1['actionTime'][i+1] - action1['actionTime'][i])
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7)):
count67 = count67 + (action1['actionTime'][i+1] - action1['actionTime'][i])
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8)):
count78 = count78 + (action1['actionTime'][i+1] - action1['actionTime'][i])
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 8) and (action1['actionType'][i + 1] == 9)):
count89 = count89 + (action1['actionTime'][i+1] - action1['actionTime'][i])
userid.append(row.userid)
time_56_all.append(count56)
time_67_all.append(count67)
time_78_all.append(count78)
time_89_all.append(count89)
count['userid'] = userid
count['time_56_all'] = time_56_all
count['time_67_all'] = time_67_all
count['time_78_all'] = time_78_all
count['time_89_all'] = time_89_all
count['time_56_all'][count['time_56_all'] == 0] = None
count['time_67_all'][count['time_67_all'] == 0] = None
count['time_78_all'][count['time_78_all'] == 0] = None
count['time_89_all'][count['time_89_all'] == 0] = None
orderFuture = pd.merge(orderFuture, count[['userid', 'time_56_all', 'time_67_all', 'time_78_all', 'time_89_all']],
on='userid', how='left')
return orderFuture
orderFuture_train = time_56_all(orderFuture_train, action_train)
orderFuture_test = time_56_all(orderFuture_test, action_test)
# 56时间间隔和的对应时间
def time_56_c(orderFuture, action):
count = pd.DataFrame(
columns=['userid', 'time_56_c', 'time_67_', 'time_78_', 'time_89_c'])
userid = []
time_56_all = []
time_67_all = []
time_78_all = []
time_89_all = []
action = action[action.orderid.isnull()]
for index, row in orderFuture.iterrows():
print(index)
action1 = action[action['userid'] == row.userid].reset_index()
count56 = 0
count67 = 0
count78 = 0
count89 = 0
for i in range(len(action1)):
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6)):
count56 = count56 + (action1['actionTime'][i+1] - action1['actionTime'][i])
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7)):
count67 = count67 + (action1['actionTime'][i+1] - action1['actionTime'][i])
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8)):
count78 = count78 + (action1['actionTime'][i+1] - action1['actionTime'][i])
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 8) and (action1['actionType'][i + 1] == 9)):
count89 = count89 + (action1['actionTime'][i+1] - action1['actionTime'][i])
userid.append(row.userid)
time_56_all.append(count56)
time_67_all.append(count67)
time_78_all.append(count78)
time_89_all.append(count89)
count['userid'] = userid
count['time_56_c'] = time_56_all
count['time_67_c'] = time_67_all
count['time_78_c'] = time_78_all
count['time_89_c'] = time_89_all
count['time_56_c'][count['time_56_c'] == 0] = None
count['time_67_c'][count['time_67_c'] == 0] = None
count['time_78_c'][count['time_78_c'] == 0] = None
count['time_89_c'][count['time_89_c'] == 0] = None
orderFuture = pd.merge(orderFuture, count[['userid', 'time_56_c', 'time_67_c', 'time_78_c', 'time_89_c']],
on='userid', how='left')
return orderFuture
orderFuture_train = time_56_c(orderFuture_train, action_train)
orderFuture_test = time_56_c(orderFuture_test, action_test)
# 第一个actionTime
def first_actionTime(orderFuture, action):
first = action.groupby(['userid']).first().reset_index()
first.rename(columns={'actionTime': 'first_actionTime'}, inplace=True)
orderFuture = | pd.merge(orderFuture, first[['userid', 'first_actionTime']], on='userid', how='left') | pandas.merge |
import pandas as pd
import numpy as np
from statsmodels.stats.proportion import proportion_confint
import os, yaml
from datetime import date, datetime as dt
from typing import List, Dict, Optional
import plotly.graph_objects as go, plotly.express as px, plotly.figure_factory as ff
from plotly.offline import init_notebook_mode
from plotly.subplots import make_subplots
with open(os.getcwd() + '/utils/config.yaml') as file:
config = yaml.load(file, Loader=yaml.Loader)
surface_colors = config['surface_colors']
def timer(f, *args, **kwargs):
'''
Timer decorator for functions
'''
def wrapper(*args, **kwargs):
tic = dt.now()
result = f(*args, **kwargs)
toc = dt.now()
print(f'@{f.__name__} took {toc-tic}')
return result
return wrapper
def get_player_name(name):
return '. '.join(['.'.join([e[0] for e in name.split(' ')[:-1]]), name.split(' ')[-1]])
class TennisDataLoader:
'''
Loads tennis matches data and players details data given both paths and stores them in
self.matches and self.players
Attributes
----------
self.matches: pd.DataFrame
dataframe containing all the players matches
self.players: pd.DataFrame
dataframe containing all the players details
self.matches_path: str
path where self.matches is read from
self.players_path: str
path where self.players is read from
'''
def __init__(self, data_path: str, source='parquet', sep=','):
'''
Loads and stores matches and players data
Parameters
----------
data_path: str
path of data
type: str, default 'parquet'
Type of extention: one between 'parquet' or 'csv'.
sep: str, default ','
Field delimiter for the input files if type='csv'
'''
if source == 'parquet':
self.matches = pd.read_parquet(data_path + '/matches.parquet')
self.players = pd.read_parquet(data_path + '/players.parquet').dropna()
elif source == 'csv':
self.matches = pd.read_csv(data_path + '/matches.csv', sep=sep)
self.players = pd.read_csv(data_path + '/players.csv', sep=sep).dropna()
else:
raise Exception('Can only load parquet and csv format')
def __repr__(self):
n_matches = self.matches.shape[0]
n_players = self.players.shape[0]
return f'TennisDataLoader storing {n_matches} matches and {n_players} players data'
class PlayerDataLoader:
'''
Create static attributes of a given tennis player, namely
Attributes
----------
player_name: str
Tennis player name
matches: pd.DataFrame
Dataframe containing all matches the player played in his/her career
n_matches: int
Number of matches the player played in his/her career
rank_df: pd.DataFrame
Dataframe containing time series of player rank and rank points over time
player_details: pd.DataFrame
Dataframe containing player details
Methods
-------
get_rank
Calculates time series of player rank and rank points
get_player_details
Retrieves player details from matches and players information dataframes
'''
def __init__(self,
player_name: str,
player_matches: pd.DataFrame,
player_details: pd.DataFrame
):
'''
Parameters
----------
player_name: str
Tennis player name
tdl: TennisDataLoader
TennisDataLoader instance
'''
self.player_name = player_name
player_details = player_details[player_details['player_name']==player_name]
self.player_id = player_details.iloc[0]['id']
self.player_matches = (player_matches.loc[player_matches['id']==self.player_id]
.sort_values(['tourney_date', 'match_num'])
)
self.n_matches = self.player_matches.shape[0]
self.player_rank = self.get_rank()
self.player_details = self.get_player_details(player_details)
def get_rank(self):
'''
Calculate time series of player rank and rank points
Returns
-------
rank_df: pd.DataFrame
rank and rank points over time
'''
rank_df = (self.player_matches
.groupby('year')
.agg(rank = ('rank', np.min))
.dropna()
.astype(int)
.reset_index()
)
return rank_df
def get_player_details(self, player_details):
'''
Retrieves player details
Returns
-------
player_details: pd.DataFrame
Dataframe containing player details
'''
age = np.round((date.today() - | pd.to_datetime(player_details['birthdate']) | pandas.to_datetime |
import numpy as np
import pandas as pd
from scipy import signal, stats
from sklearn.linear_model import LinearRegression
from obspy.signal.trigger import classic_sta_lta
def sequence_generator(data, xcol="acoustic_data", ycol="time_to_failure", size=150000):
"""Generator that extracts segments of the signal from the data.
Parameters
----------
data: pd.DataFrame,
The data with all observations. Must have two columns: one with the measurement
of the signal and one with the target, i.e., time to the next earthquake.
xcol: str, optional (default: "acoustic_data"),
The column referring to the the signal data.
ycol: str, optional (default: "time_to_failure"),
The column referring to the target value.
size: int, optional (default: 150,000),
The number of observations to include in a single sequence. Should be left at
its default to generate sequences similar to the test data.
Returns
-------
A generator object that generates tuples like:
(sequence of 'size' observations of xcol, last corresponding value of ycol).
"""
while True:
indices = np.random.randint(0, len(data) - size - 1, 10000)
for idx in indices:
y = data[ycol].iloc[idx + size - 1]
x = data[idx:(idx + size)][xcol].values
yield x, y
class FeatureComputer():
"""Class that computes features over a given array of observations.
This is done in a class so that it can be initialized once and can then be used throughout the
train-validate-test sequence without specifying all the parameters everytime.
Parameters
----------
minimum, maximum, mean, median, std: boolean, optional (default: True),
Whether to include the corresponding feature.
quantiles: list of floats,
The quantiles to compute.
abs_min, abs_max, abs_mean, abs_median, abs_std: boolean, optional (default: True),
The same features as above, but calculated over the absolute signal.
abs_quantiles: list of floats,
The quantiles to compute over the absolute signal.
mean_abs_delta, mean_rel_delta: boolean, optional (default: True),
Whether to compute the average change per observation. For 'mean_rel_delta' it is divided
by the value of the previous observation, which leads to a change proportion.
max_to_min: boolean, optional (default: True),
Whether to compute the rate between the absolute maximum and the absolute minimum.
count_abs_big: list of floats,
The thresholds for which it is counted how many times the absolute signal
exceeds the threshold.
abs_trend: boolean, optional (default: True),
Whether to calculate the linear trend of the time series.
mad: boolean, optional (default: True),
Whether to calculate the mean absolute deviation of the time series.
skew: boolean, optional (default: True),
Whether to calculate the skewness of the time series.
abs_skew: boolean, optional (default: True),
Whether to calculate the skewness of the absolute values of the time series.
kurtosis: boolean, optional (default: True),
Whether to calculate the kurosis of the time series. The kurtosis
measures the tailedness of a time series
abs_kurtosis: boolean, optional (default: True),
Whether to calculate the kurosis of the absolute values of the time series.
The kurtosis measures the tailedness of a time series
hilbert: boolean, optional (default: True),
Whether to calculate the abs mean in hilbert tranformed space.
hann: boolean, optional (default: True),
Whether to calculate the abs mean in hann window.
stalta: list of floats,
The short time average and the long time average over which the short time
average over long time average is calculated.
stalta_window: list of floats,
The short time average and the long time average over which the short time
average over long time average is calculated per window.
exp_mov_ave: list of floats,
The time windows over which the mean of the mean of the exponential
moving average is calculated.
exp_mov_ave_window: list of floats,
The time windows over which the mean of the mean of the exponential
moving average is calculated per window.
window: int or None, optional (default: None),
If given, calculates the features over subsequences of size 'window'.
array_length: int, optional (default: 150000),
The array length to expect. Only needed if window is not None.
Returns
-------
result: np.array,
The specified features of the given array.
Notes
-----
In order to see which value in the result refers to which feature, see 'self.feature_names'.
"""
feats = ["minimum", "maximum", "mean", "median", "std", "abs_min", "abs_max", "abs_mean",
"abs_median", "abs_std", "mean_abs_delta", "mean_rel_delta", "max_to_min", "abs_trend",
"mad", "skew", "abs_skew", "kurtosis", "abs_kurtosis", "hilbert", "hann"]
def __init__(self, minimum=True, maximum=True, mean=True, median=True, std=True, quantiles=None,
abs_min=True, abs_max=True, abs_mean=True, abs_median=True, abs_std=True, abs_quantiles=None,
mean_abs_delta=True, mean_rel_delta=True, max_to_min=True, count_abs_big=None,
abs_trend=True, mad=True, skew=True, abs_skew=True, kurtosis=True, abs_kurtosis=True,
hilbert=True, hann=True, stalta=None, stalta_window=None, exp_mov_ave=None, exp_mov_ave_window=None,
window=None, array_length=150000):
self.minimum = minimum
self.maximum = maximum
self.mean = mean
self.median = median
self.std = std
self.abs_min = abs_min
self.abs_max = abs_max
self.abs_mean = abs_mean
self.abs_median = abs_median
self.abs_std = abs_std
self.mean_abs_delta = mean_abs_delta
self.mean_rel_delta = mean_rel_delta
self.max_to_min = max_to_min
self.abs_trend = abs_trend
self.mad = mad
self.skew = skew
self.abs_skew = abs_skew
self.kurtosis = kurtosis
self.abs_kurtosis = abs_kurtosis
self.hilbert = hilbert
self.hann = hann
if quantiles is None:
self.quantiles = []
else:
self.quantiles = quantiles
if abs_quantiles is None:
self.abs_quantiles = []
else:
self.abs_quantiles = abs_quantiles
self.window = window
if count_abs_big is None:
self.count_abs_big = []
else:
self.count_abs_big = count_abs_big
if stalta is None:
self.stalta = []
else:
self.stalta = stalta
if stalta_window is None:
self.stalta_window = []
else:
self.stalta_window = stalta_window
if exp_mov_ave is None:
self.exp_mov_ave = []
else:
self.exp_mov_ave = exp_mov_ave
if exp_mov_ave_window is None:
self.exp_mov_ave_window = []
else:
self.exp_mov_ave_window = exp_mov_ave_window
if self.window is not None:
self.indicators = np.array(([np.ones(window)*i for i in range(int(np.ceil(array_length/window)))]),
dtype=int).flatten()
self.indicators = self.indicators[:array_length]
assert len(self.indicators) == array_length, "Lengths do not match"
self.feature_names = self._infer_names()
self.n_features = len(self.feature_names)
def _infer_names(self):
"""Infer the names of the features that will be calculated."""
quantile_names = [str(q) + "-quantile" for q in self.quantiles]
abs_quantile_names = [str(q) + "-abs_quantile" for q in self.abs_quantiles]
count_abs_big_names = [str(q) + "-count_big" for q in self.count_abs_big]
stalta_names = ["all_stalta-" + str(q[0]) + "-" + str(q[1]) for q in self.stalta]
exp_mov_ave_names = ["all_exp_mov_ave-" + str(q) for q in self.exp_mov_ave]
if self.window is not None:
stalta_names_window = ["stalta-" + str(q[0]) + "-" + str(q[1]) for q in self.stalta_window]
exp_mov_ave_names_window = ["exp_mov_ave-" + str(q) for q in self.exp_mov_ave_window]
names = np.array(self.feats)[[self.minimum, self.maximum, self.mean, self.median, self.std,
self.abs_min, self.abs_max, self.abs_mean, self.abs_median,
self.abs_std, self.mean_abs_delta, self.mean_rel_delta,
self.max_to_min, self.abs_trend, self.mad, self.skew, self.abs_skew,
self.kurtosis, self.abs_kurtosis, self.hilbert, self.hann]]
names = names.tolist() + quantile_names + abs_quantile_names + count_abs_big_names
if self.window is not None:
all_names = [str(i) + "_" + name for i in np.unique(self.indicators) for name in names + stalta_names_window + exp_mov_ave_names_window]
self.result_template_window = np.zeros(int(len(all_names) / len(np.unique(self.indicators))))
all_names = all_names + ["all_" + name for name in names] + stalta_names + exp_mov_ave_names
self.result_template = np.zeros(len(names + stalta_names + exp_mov_ave_names))
return all_names
else:
all_names = names + stalta_names + exp_mov_ave_names
self.result_template = np.zeros(len(all_names))
return all_names
def compute(self, arr):
if self.window is None:
return self._compute_features(arr)
else:
df = pd.DataFrame({"arr": arr, "indicator": self.indicators})
values = (df.groupby("indicator")["arr"]
.apply(lambda x: self._compute_features(x, window=True))
.apply(pd.Series)
.values
.flatten())
# include values over the whole segment
overall_values = self._compute_features(arr)
return np.concatenate([values, overall_values])
def _compute_features(self, arr, window=False):
if window:
result = np.zeros_like(self.result_template_window)
else:
result = np.zeros_like(self.result_template)
i = 0
if self.minimum:
result[i] = np.min(arr)
i += 1
if self.maximum:
result[i] = np.max(arr)
i += 1
if self.mean:
result[i] = np.mean(arr)
i += 1
if self.median:
result[i] = np.median(arr)
i += 1
if self.std:
result[i] = np.std(arr)
i += 1
if self.abs_min:
result[i] = np.min(np.abs(arr))
i += 1
if self.abs_max:
result[i] = np.max(np.abs(arr))
i += 1
if self.abs_mean:
result[i] = np.mean(np.abs(arr))
i += 1
if self.abs_median:
result[i] = np.median(np.abs(arr))
i += 1
if self.abs_std:
result[i] = np.std(np.abs(arr))
i += 1
if self.mean_abs_delta:
result[i] = np.mean(np.diff(arr))
i += 1
if self.mean_rel_delta:
result[i] = np.mean(np.nonzero((np.diff(arr) / arr[:-1]))[0])
i += 1
if self.max_to_min:
result[i] = np.max(arr) / np.abs(np.min(arr))
i += 1
if self.abs_trend:
idx = np.array(range(len(arr)))
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), np.abs(arr))
result[i] = lr.coef_[0]
i += 1
if self.mad: # mean absolute deviation
result[i] = np.mean(np.abs(arr - np.mean(arr)))
i += 1
if self.skew:
result[i] = stats.skew(arr)
i += 1
if self.abs_skew:
result[i] = stats.skew(np.abs(arr))
i += 1
if self.kurtosis: # measure of tailedness
result[i] = stats.kurtosis(arr)
i += 1
if self.abs_kurtosis: # measure of tailedness
result[i] = stats.kurtosis(np.abs(arr))
i += 1
if self.hilbert: # abs mean in hilbert tranformed space
result[i] = np.mean(np.abs(signal.hilbert(arr)))
i += 1
if self.hann: # mean in hann window
result[i] = np.mean(signal.convolve(arr, signal.hann(150), mode='same') / np.sum(signal.hann(150)))
i += 1
if self.quantiles is not None:
result[i:i + len(self.quantiles)] = np.quantile(arr, q=self.quantiles)
i += len(self.quantiles)
if self.abs_quantiles is not None:
result[i:i + len(self.abs_quantiles)] = np.quantile(np.abs(arr), q=self.abs_quantiles)
i += len(self.abs_quantiles)
if self.count_abs_big is not None:
result[i: i + len(self.count_abs_big)] = np.array([len(arr[np.abs(arr) > q]) for q in self.count_abs_big])
i += len(self.count_abs_big)
if self.stalta:
if window:
result[i:i + len(self.stalta_window)] = np.array(
[np.mean(classic_sta_lta(arr, q[0], q[1])) for q in self.stalta_window])
i += len(self.stalta_window)
else:
result[i:i + len(self.stalta)] = np.array(
[np.mean(classic_sta_lta(arr, q[0], q[1])) for q in self.stalta])
i += len(self.stalta)
if self.exp_mov_ave:
if window:
result[i:i + len(self.exp_mov_ave_window)] = np.array(
[np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean()) for q in self.exp_mov_ave_window])
i += len(self.exp_mov_ave_window)
else:
result[i:i + len(self.exp_mov_ave)] = np.array(
[np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean()) for q in self.exp_mov_ave])
i += len(self.exp_mov_ave)
return result
def create_feature_dataset(data, feature_computer, xcol="acoustic_data", ycol="time_to_failure", n_samples=100,
stft=False, stft_feature_computer=None):
"""Samples sequences from the data, computes features for each sequence, and stores the result
in a new dataframe.
Parameters
----------
data: pd.DataFrame,
The data with all observations. Must have two columns: one with the measurement
of the signal and one with the target, i.e., time to the next earthquake.
feature_computer: FeatureComputer object or similar,
A class that implements a method '.compute()' that takes an array and returns
features. It must also have an attribute 'feature_names' that shows the corresponding
names of the features.
xcol: str, optional (default: "acoustic_data"),
The column referring to the the signal data.
ycol: str, optional (default: "time_to_failure"),
The column referring to the target value.
n_samples: int, optional (default: 100),
The number of sequences to process and return.
stft: bool, optional (default: False),
Whether to calculate the Short Time Fourier Transform.
stft_feature_computer: FeatureComputer object or None,
The computer for stft features.
Returns
-------
feature_data: pd.DataFrame,
A new dataframe of shape (n_samples, number of features) with the new features per sequence.
"""
if (stft is True) and (stft_feature_computer is None):
assert feature_computer.window is None, ("If stft is True, feature_computer must have window=None or"
"a separate stft_feature_computer must be provided.")
stft_feature_computer = feature_computer
new_data = pd.DataFrame({feature: np.zeros(n_samples) for feature in feature_computer.feature_names})
targets = np.zeros(n_samples)
data_gen = sequence_generator(data, xcol=xcol, ycol=ycol, size=150000)
if stft:
new_data_stft = pd.DataFrame({feature + '_stft': np.zeros(n_samples) for feature in stft_feature_computer.feature_names})
for i in range(n_samples):
x, y = next(data_gen)
new_data.iloc[i, :] = feature_computer.compute(x)
targets[i] = y
if stft:
_, _, zxx = signal.stft(x)
x_stft = np.sum(np.abs(zxx), axis=0)
new_data_stft.iloc[i, :] = stft_feature_computer.compute(x_stft)
if stft:
new_data = | pd.concat([new_data, new_data_stft], axis=1) | pandas.concat |
import logging
from config import Config as cfg
import util.db as db
from util.Constants import Constants as cs
import pickle as pkl
import os
import matplotlib.pyplot as plt
import re
import numpy as np
import pandas as pd
import shutil
logging.basicConfig(format='%(levelname)s:%(message)s', level=cfg.logging_level)
def list_to_dict(card_list):
card_dict = {}
for card in card_list:
if card not in card_dict:
card_dict[card] = 1
else:
card_dict[card] += 1
return card_dict
def print_divider():
logging.debug(cs.DIVIDER)
def generate_run_id():
if cfg.run_id == 'TEST':
return cfg.run_id + '_' + str(db.get_global_max_id())
else:
return cfg.run_id
def get_pretty_time(duration, num_digits=2):
# Duration is assumed to be in seconds. Returns a string with the appropriate suffix (s/m/h)
if duration > 60**2:
return str(round(duration / 60**2, num_digits)) + 'h'
if duration > 60:
return str(round(duration / 60, num_digits)) + 'm'
else:
return str(round(duration, num_digits)) + 's'
def save_config(config, path=None):
path = 'latest' if path is None else path
os.makedirs('saved_configs', exist_ok=True)
with open(os.path.join(cfg.config_folder, path + '.pkl'), 'wb') as f:
pkl.dump(config, f)
def get_config(path=None):
path = 'latest' if path is None else path
with open(os.path.join(cfg.config_folder, path + '.pkl'), 'rb') as f:
config = pkl.load(f)
return config
def setup_file_logger(name, filename='run_log', level=cfg.logging_level):
log_setup = logging.getLogger(name)
os.makedirs(cfg.log_folder, exist_ok=True)
fileHandler = logging.FileHandler(os.path.join(cfg.log_folder, filename + '.log'), mode='a')
formatter = logging.Formatter('%(levelname)s: %(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
fileHandler.setFormatter(formatter)
log_setup.setLevel(level)
log_setup.addHandler(fileHandler)
def convert_np_hist_to_plot(np_hist):
"""
Converts a numpy representation of a histogram into a matplotlib.pyplot object
:param np_hist: tuple generated by np.histogram(vec)
:return: Returns a matplotlib.pyplot bar plot object
"""
height, bins = np_hist
width = (bins.max() - bins.min())/(len(bins) - 1)
return plt.bar(height=height, x=bins[:-1], width=width)
def get_checkpoint_model_name(cycle):
# Returns a formatted string for the title of a checkpointed model
return f'cycle_{cycle}_checkpoint'
def get_max_checkpoint_cycle(run_id):
# Returns the current maximum cycle value for all checkpointed models of a specific run id
path = os.path.join(cfg.checkpoint_folder, run_id)
checkpoints = [int(re.search('\d+', cp)[0]) for cp in os.listdir(path)]
return np.max(checkpoints)
def get_model_checkpoint(run_id, cycle=-1):
# cycle of -1 (default) implies the user wants the largest checkpoint value available
if cycle == -1:
cycle = get_max_checkpoint_cycle(run_id=run_id)
path = os.path.join(cfg.checkpoint_folder, run_id, get_checkpoint_model_name(cycle=cycle) + '.pkl')
with open(path, 'rb') as f:
return pkl.load(f)
def get_model_history(run_id):
path = os.path.join(cfg.history_folder, run_id + '_history.pkl')
with open(path, 'rb') as f:
return pkl.load(f)
def get_trick_reward(trick_score, player, winner):
# Returns reward for player based on trick score and trick winner
# Positive if winner, negative if loser.
return trick_score * -1 if player != winner else trick_score
def get_experiment_file(file):
return pd.read_csv(os.path.join(cfg.experiment_folder, file), dtype={'episodes_per_cycle': int})
def overwrite_cfg(exp, config):
config.logging_level = logging.INFO
for key, value in exp.items():
if not | pd.isna(value) | pandas.isna |
"""
Name : c7_31_mrege_01.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import scipy as sp
import pandas as pd
#
x= pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
y = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K6'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
print(sp.shape(x))
print(sp.shape(y))
print(x)
print(y)
result = | pd.merge(x,y, on='key') | pandas.merge |
# This module loads and prepares the data
import torch, time, sys, re
import pandas as pd
from torch.nn import functional as F
from torch.utils.data import DataLoader
import numpy as np
ALPHABET = 'ACDEFGHIKLMNPQRSTVWXYZ-'
SEQ2IDX = dict(map(reversed, enumerate(ALPHABET)))
def compute_weights(data):
#Only remove '-'
msk_idx = 999
#Compute similarity matrix from windows and flatten encoding:
sim = data.flatten(1)@data.flatten(1).T
#Ensure that we do not have '-' and define seq_len from this:
seq_len = data.argmax(dim=1) !=msk_idx
# normalization factor seq_len
seq_len = seq_len.sum(-1).unsqueeze(-1)
#Compute weights:
weights = 1.0 / ((sim/seq_len)>0.8).sum(1).float()
neff = weights.sum()
return weights, neff
def fasta(file_path):
"""This function parses a subset of the FASTA format
https://en.wikipedia.org/wiki/FASTA_format"""
print(f"Parsing fasta '{file_path}'")
data = {
'ur_up_': [], 'accession': [],
'entry_name': [], 'offset': [],
'taxonomy': [], 'sequence': []
}
with open(file_path, 'r') as f:
for i, line in enumerate(f):
line = line.strip()
if line[0] == '>':
key = line[1:]
if i == 0:
name, offset = key.split("/")
ur_up_, acc = None, None
else:
ur_up_, acc, name_offset = key.split("|")
name, offset = name_offset.split('/')
data['ur_up_'].append(ur_up_)
data['accession'].append(acc)
data['entry_name'].append(name)
data['offset'].append(offset)
data['sequence'].append('')
data['taxonomy'].append(name.split('_')[1])
else:
data['sequence'][-1] += line
if i and (i % 50000 == 0):
print(f"Reached: {i}")
return pd.DataFrame(data=data)
def labels(labels_file, labels = []):
"""Parses the labels file"""
print(f"Parsing labels '{labels_file}'")
with open(labels_file, 'r') as f:
for i, line in enumerate(f):
labels.append(line.split(':')[-1].strip())
return | pd.Series(labels) | pandas.Series |
"""
Open Power System Data
Timeseries Datapackage
imputation.py : fill functions for imputation of missing data.
"""
from datetime import datetime, date, timedelta
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
def find_nan(df, res_key, headers, patch=False):
'''
Search for missing values in a DataFrame and optionally apply further
functions on each column.
Parameters
----------
df : pandas.DataFrame
DataFrame to inspect and possibly patch
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
patch : bool, default=False
If False, return unaltered DataFrame,
if True, return patched DataFrame
Returns
----------
patched: pandas.DataFrame
original df or df with gaps patched and marker column appended
nan_table: pandas.DataFrame
Contains detailed information about missing data
'''
nan_table = pd.DataFrame()
patched = pd.DataFrame()
marker_col = pd.Series(np.nan, index=df.index)
if df.empty:
return patched, nan_table
# Get the frequency/length of one period of df
one_period = pd.Timedelta(res_key)
for col_name, col in df.iteritems():
col = col.to_frame()
message = '| {:5.5} | {:6.6} | {:10.10} | {:10.10} | {:10.10} | '.format(
res_key, *col_name[0:4])
# make an empty list of NaN regions to use as default
nan_idx = pd.MultiIndex.from_arrays([
[0, 0, 0, 0],
['count', 'span', 'start_idx', 'till_idx']])
nan_list = pd.DataFrame(index=nan_idx, columns=col.columns)
# skip this column if it has no entries at all.
# This will also delete the column from the patched df
if col.empty:
continue
# tag all occurences of NaN in the data with True
# (but not before first or after last actual entry)
col['tag'] = (
(col.index >= col.first_valid_index()) &
(col.index <= col.last_valid_index()) &
col.isnull().transpose().as_matrix()
).transpose()
# make another DF to hold info about each region
nan_regs = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json
import csv
import time
import sys
import pandas as pd
import numpy as np
import prettyprinter as pp
import matplotlib.pyplot as plt
# pandas options
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = None
pd.options.display.max_rows = None
# spotify api
from spotipy.oauth2 import SpotifyClientCredentials
from spotipy.oauth2 import SpotifyOAuth
import spotipy
# FILL THIS IN WITH YOUR OWN API CREDENTIALS
client_id = "..."
client_secret = "..."
# In[ ]:
sp = spotipy.Spotify(auth_manager = SpotifyClientCredentials(client_id = client_id,
client_secret = client_secret))
# #### READING in original dataset
# In[ ]:
df = pd.read_csv ('data.csv')
# #### REMOVING songs before year 1950
# In[ ]:
new_df = df[df['year'] > 1950]
# #### PULLING additional song features
# In[ ]:
def get_song_info(s_ids):
n = 0
p = 50
print(n)
print(p)
fields = ['song_id', 'primary_artist_id', 'album_type', 'time_signature']
with open("song_info.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
for i in range(int(len(s_ids)/50)+1):
print(i)
print("\n")
ids = s_ids[n:p]
n = n + 50
p = p + 50
print(n)
print(p)
urns = ["spotify:track:" + x for x in ids]
tracks_analysis = sp.audio_features(urns)
tracks = sp.tracks(urns)['tracks']
m = 0
for track in tracks:
s_id = track['id']
a_id = track['artists'][0]['id']
album_type = track['album']['album_type']
time_signature = tracks_analysis[m]['time_signature']
m = m + 1
csvwriter.writerow([s_id, a_id, album_type, time_signature])
i = 0
s_ids = list(new_df['id'])
start = time.time()
get_song_info(s_ids)
end = time.time()
print("time:", str(end - start))
# #### PULLING additional artist info
# In[ ]:
def get_artists_info(a_ids):
n = 0
p = 50
print(n)
print(p)
fields = ['artist_id', 'genres', 'followers', 'popularity']
with open("artist_info.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
for i in range(int(len(a_ids)/50)+1):
print(i)
print("\n")
ids = a_ids[n:p]
n = n + 50
p = p + 50
print(n)
print(p)
urns = ["spotify:artist:" + x for x in ids]
artists = sp.artists(urns)['artists']
for artist in artists:
a_id = artist['id']
followers = artist['followers']['total']
genres = artist['genres']
popularity = artist['popularity']
csvwriter.writerow([a_id, genres, followers, popularity])
start = time.time()
udf = pd.read_csv('song_info.csv')
ua_ids = list(set(udf["primary_artist_id"]))
get_artists_info(ua_ids)
end = time.time()
print("time:", str(end - start))
# #### MERGING pulled data into main df
# In[ ]:
# MERGING SONG DATA
new_df = new_df.rename(columns={"id": "song_id", "popularity": "song_popularity"}, errors="raise")
sdf = pd.read_csv('song_info.csv')
m_df = pd.merge(new_df, sdf, on="song_id")
m_df
# In[ ]:
# MERGING ARTIST INFO
adf = | pd.read_csv('artist_info.csv') | pandas.read_csv |
import pandas as pd
import base64
import pickle
import os
import sys
import json
import re
#Concatenates data_info and data_links.
data_info = pd.read_csv("data_info.csv", names= ['text', 'id', 'url'])
#data_info = data_info.head(x)
print(data_info)
i =0
while i< len(data_info['id']):
item = str(data_info['id'][i])
item = re.sub("\^\^http://www.w3.org/2001/XMLSchema#integer", "", item)
data_info['id'][i] = item
i+=1
names = []
j=0
while j< len(data_info['id']):
names.append(str(re.sub("http://dbpedia.org/resource/", "", data_info['url'][j])))
j+=1
data_info['title'] = names
# Because some of the titles were numbers, the following preprocessing steps couldn't work with those articles. We filter them out here.
i = 0
for item in data_info["title"]:
if item is float:
data_info["title"][i] = str(data_info["title"][i])
i+=1
if item is int:
data_info["title"][i] = str(data_info["title"][i])
i+=1
i+=1
#data_info = data_info.sort_values('url')
#data_info = data_info.reset_index(drop=True)
print(data_info['url'])
data_links = pd.read_csv('data_links.csv')
#data_links = data_links.head(y)
#data_links = data_links.sort_values('url')
#data_links = data_links.reset_index(drop=True)
print(data_links)
# Merge all internal links for each respective article in a list.
sorted_links = []
sorted_urls= []
k = 0
while k<len(data_links['url']):
current_url_list = []
current_url = data_links['url'][k]
while data_links['url'][k] == current_url:
current_url_list.append(data_links['internal_links'][k])
k+=1
if k > len(data_links['url'])-1:
break
sorted_links.append(current_url_list)
sorted_urls.append(current_url)
#print(current_url_list)
print(sorted_links)
print(current_url)
data_links = | pd.DataFrame(data=sorted_urls, columns=['url']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
| assert_frame_equal(targ, res1) | pandas.util.testing.assert_frame_equal |
""" Math attributes
:Author: <NAME> <<EMAIL>>
:Date: 2017-05-10
:Copyright: 2017, Karr Lab
:License: MIT
"""
from .. import core
import json
import numpy
import pandas
__all__ = [
'ArrayAttribute',
'TableAttribute',
]
class ArrayAttribute(core.LiteralAttribute):
""" numpy.ndarray attribute
Attributes:
min_length (:obj:`int`): minimum length
max_length (:obj:`int`): maximum length
default (:obj:`numpy.ndarray`): default value
"""
def __init__(self, min_length=0, max_length=float('inf'), default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
min_length (:obj:`int`, optional): minimum length
max_length (:obj:`int`, optional): maximum length
default (:obj:`numpy.array`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
if default is not None and not isinstance(default, numpy.ndarray):
raise ValueError('`default` must be a `numpy.array` or `None`')
if not isinstance(min_length, (int, float)) or min_length < 0:
raise ValueError('`min_length` must be a non-negative integer')
if not isinstance(max_length, (int, float)) or max_length < min_length:
raise ValueError('`max_length` must be an integer greater than or equal to `min_length`')
super(ArrayAttribute, self).__init__(default=default, none_value=none_value,
verbose_name=verbose_name, description=description,
primary=primary, unique=unique)
if primary:
self.type = numpy.ndarray
else:
self.type = (numpy.ndarray, None.__class__)
self.min_length = min_length
self.max_length = max_length
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): semantically equivalent representation
Returns:
:obj:`tuple` of :obj:`numpy.array`, :obj:`core.InvalidAttribute` or :obj:`None`: tuple of cleaned value and cleaning error
"""
if self.default is not None:
dtype = self.default.dtype.type
else:
dtype = None
if value is None:
value = None
error = None
elif isinstance(value, str) and value == '':
value = None
error = None
elif isinstance(value, str):
try:
value = numpy.array(json.loads(value), dtype)
error = None
except Exception:
value = None
error = 'Unable to parse numpy array from string'
elif isinstance(value, (list, tuple, numpy.ndarray)):
value = numpy.array(value, dtype)
error = None
else:
value = None
error = core.InvalidAttribute(self, [
('ArrayAttribute must be None, an empty string, '
'a JSON-formatted array, a tuple, a list, '
'or a numpy array')
])
return (value, error)
def validate(self, obj, value):
""" Determine if :obj:`value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`numpy.array`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return
list of errors as an instance of :obj:`core.InvalidAttribute`
"""
errors = []
if value is not None:
if not isinstance(value, numpy.ndarray):
errors.append('Value must be an instance of `numpy.ndarray`')
elif self.default is not None:
for elem in numpy.nditer(value):
if not isinstance(elem, self.default.dtype.type):
errors.append('Array elements must be of type `{}`'.format(self.default.dtype.type.__name__))
break
if self.min_length and (value is None or len(value) < self.min_length):
errors.append('Value must be at least {:d} characters'.format(self.min_length))
if self.max_length and value is not None and len(value) > self.max_length:
errors.append('Value must be less than {:d} characters'.format(self.max_length))
if self.primary and (value is None or len(value) == 0):
errors.append('{} value for primary attribute cannot be empty'.format(
self.__class__.__name__))
if errors:
return core.InvalidAttribute(self, errors)
return None
def validate_unique(self, objects, values):
""" Determine if the attribute values are unique
Args:
objects (:obj:`list` of :obj:`Model`): list of :obj:`Model` objects
values (:obj:`list` of :obj:`numpy.array`): list of values
Returns:
:obj:`core.InvalidAttribute` or None: None if values are unique, otherwise return a
list of errors as an instance of :obj:`core.InvalidAttribute`
"""
str_values = []
for v in values:
str_values.append(self.serialize(v))
return super(ArrayAttribute, self).validate_unique(objects, str_values)
def serialize(self, value):
""" Serialize string
Args:
value (:obj:`numpy.array`): Python representation
Returns:
:obj:`str`: simple Python representation
"""
if value is not None:
return json.dumps(value.tolist())
return ''
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`numpy.array`): value of the attribute
Returns:
:obj:`list`: simple Python representation of a value of the attribute
"""
if value is None:
return None
else:
return value.tolist()
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`list`): simple Python representation of a value of the attribute
Returns:
:obj:`numpy.array`: decoded value of the attribute
"""
if json is None:
return None
else:
if self.default is not None:
dtype = self.default.dtype.type
else:
dtype = None
return numpy.array(json, dtype)
class TableAttribute(core.LiteralAttribute):
""" pandas.DataFrame attribute
Attributes:
default (:obj:`pandas.DataFrame`): default value
"""
def __init__(self, default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
default (:obj:`pandas.DataFrame`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
if default is not None and not isinstance(default, pandas.DataFrame):
raise ValueError('`default` must be a `pandas.DataFrame` or `None`')
super(TableAttribute, self).__init__(default=default, none_value=none_value,
verbose_name=verbose_name, description=description,
primary=primary, unique=unique)
if primary:
self.type = pandas.DataFrame
else:
self.type = (pandas.DataFrame, None.__class__)
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): semantically equivalent representation
Returns:
:obj:`tuple` of :obj:`pandas.DataFrame`, :obj:`core.InvalidAttribute` or :obj:`None`: tuple of cleaned value and cleaning error
"""
if self.default is not None:
dtype = self.default.values.dtype.type
else:
dtype = None
if value is None:
value = None
error = None
elif isinstance(value, str) and value == '':
value = None
error = None
elif isinstance(value, str):
try:
dict_value = json.loads(value)
index = dict_value.pop('_index')
value = pandas.DataFrame.from_dict(dict_value, dtype=dtype)
value.index = pandas.Index(index)
error = None
except Exception:
value = None
error = 'Unable to parse pandas.DataFrame from string'
elif isinstance(value, dict):
try:
index = value.pop('_index')
value = pandas.DataFrame(value)
value = value.astype(dtype)
value.index = pandas.Index(index)
error = None
except Exception:
value = None
error = 'Unable to parse pandas.DataFrame from dict'
elif isinstance(value, pandas.DataFrame):
error = None
else:
value = None
error = core.InvalidAttribute(self, [
('TableAttribute must be None, an empty string, '
'a JSON-formatted dict, a dict, '
'or a pandas.DataFrame')
])
return (value, error)
def validate(self, obj, value):
""" Determine if :obj:`value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`pandas.DataFrame`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return list of
errors as an instance of :obj:`core.InvalidAttribute`
"""
errors = []
if value is not None:
if not isinstance(value, pandas.DataFrame):
errors.append('Value must be an instance of `pandas.DataFrame`')
elif self.default is not None:
for elem in numpy.nditer(value.values):
if not issubclass(elem.dtype.type, self.default.values.dtype.type):
errors.append('Array elements must be of type `{}`'.format(self.default.values.dtype.type.__name__))
break
if self.primary and (value is None or value.values.size == 0):
errors.append('{} value for primary attribute cannot be empty'.format(
self.__class__.__name__))
if errors:
return core.InvalidAttribute(self, errors)
return None
def validate_unique(self, objects, values):
""" Determine if the attribute values are unique
Args:
objects (:obj:`list` of :obj:`Model`): list of :obj:`Model` objects
values (:obj:`list` of :obj:`pandas.DataFrame`): list of values
Returns:
:obj:`core.InvalidAttribute` or None: None if values are unique, otherwise return a
list of errors as an instance of :obj:`core.InvalidAttribute`
"""
str_values = []
for v in values:
str_values.append(self.serialize(v))
return super(TableAttribute, self).validate_unique(objects, str_values)
def serialize(self, value):
""" Serialize string
Args:
value (:obj:`pandas.DataFrame`): Python representation
Returns:
:obj:`str`: simple Python representation
"""
if value is not None:
dict_value = value.to_dict()
dict_value['_index'] = value.index.values.tolist()
return json.dumps(dict_value)
return ''
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`pandas.DataFrame`): value of the attribute
Returns:
:obj:`dict`: simple Python representation of a value of the attribute
"""
if value is None:
return None
else:
dict_value = value.to_dict()
dict_value['_index'] = value.index.values.tolist()
return dict_value
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`dict`): simple Python representation of a value of the attribute
Returns:
:obj:`pandas.DataFrame`: decoded value of the attribute
"""
if json is None:
return None
else:
if self.default is not None:
dtype = self.default.values.dtype.type
else:
dtype = None
index = json.pop('_index')
value = | pandas.DataFrame.from_dict(json, dtype=dtype) | pandas.DataFrame.from_dict |
import tensorflow as tf
from random import shuffle, sample
import os
import random
import tensorflow.keras
import pandas as pd
import sqlite3
import numpy as np
import pickle
from PIL import Image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import utils
#from import utils
from multilabeldirectoryiterator import MultiLabelDirectoryIterator
from fullimagepointcroppingloader import FullImagePointCroppingLoader
class KerasDataset:
SQLITE = "SQLITE"
CSV = "CSV"
def __init__(self,
filepath,
label_key,
image_path_key,
category_limit=10000000,
query=None,
save_path=None,
img_width=256,
img_height=256,
batch_size=16,
patch_sizes=[]):
self.save_path = save_path
self.IMG_WIDTH = img_width
self.IMG_HEIGHT = img_height
self.BATCH_SIZE = batch_size
if ".sqlite" in filepath:
self.X_train, \
self.X_val, \
self.X_test, \
self.y_train, \
self.y_val, \
self.y_test, \
self.classes, \
self.class_weight_dict = self.package_from_sqlite(filepath, query, label_key, image_path_key, category_limit, save_path)
self.mean_image = self.calculate_mean_image(self.X_train)
elif ".csv" in filepath:
self.X_train, \
self.X_val, \
self.X_test, \
self.y_train, \
self.y_val, \
self.y_test, \
self.classes, \
self.class_weight_dict = self.package_from_csv(filepath, label_key, image_path_key, category_limit, save_path)
self.mean_image = self.calculate_mean_image(self.X_train)
else:
self.X_train, \
self.X_val, \
self.X_test, \
self.y_train, \
self.y_val, \
self.y_test, \
self.classes, \
self.class_weight_dict = self.load_saved_data(filepath)
self.mean_image = self.load_mean_image(filepath)
self.training = self.make_train_generator(self.X_train, self.y_train, patch_sizes)
self.validation = self.make_val_generator(self.X_val, self.y_val, patch_sizes)
def train_val_test(self, df, label_key, image_path_key, limit):
LABEL_KEY = label_key
SAMPLE_SIZE = limit
labels = df[LABEL_KEY].unique()
dfs = []
for label in labels:
sub_df = df[df[LABEL_KEY] == label]
if len(sub_df) <= SAMPLE_SIZE:
dfs.append(sub_df)
else:
dfs.append(sub_df.sample(n=SAMPLE_SIZE))
df = pd.concat(dfs)
X = []
y = []
for index, row in df.iterrows():
X.append(row[image_path_key])
y.append(row[LABEL_KEY])
le = preprocessing.LabelEncoder()
y = le.fit_transform(y)
from sklearn.utils import class_weight
class_weight = class_weight.compute_class_weight('balanced', np.unique(y), y)
class_weight_dict = dict(enumerate(class_weight))
onehot_y = np.zeros((len(y), len(le.classes_)), dtype="float16")
for i, label_index in enumerate(y):
onehot_y[i, label_index] = 1.
y = onehot_y
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
return X_train, X_val, X_test, y_train, y_val, y_test, le.classes_, class_weight_dict
def package_from_dataframe(self, df, label_key, image_path_key, category_limit, save_path=None):
X_train, X_val, X_test, y_train, y_val, y_test, classes, class_weight_dict = self.train_val_test(df,
label_key=label_key,
image_path_key=image_path_key,
limit=category_limit)
if save_path is not None:
if not os.path.isdir(save_path):
os.makedirs(save_path)
self.pickle_objects(save_path,
[X_train, X_val, X_test, y_train, y_val, y_test, classes, class_weight_dict],
["X_train", "X_val", "X_test", "y_train", "y_val", "y_test", "classes", "class_weight_dict"])
self.save_labels(save_path, classes)
return X_train, X_val, X_test, y_train, y_val, y_test, classes, class_weight_dict
def package_from_csv(self, csv_file, label_key, image_path_key, category_limit, save_path=None):
all_photos = pd.read_csv(csv_file)
return self.package_from_dataframe(all_photos, label_key=label_key, image_path_key=image_path_key,
category_limit=category_limit, save_path=save_path)
def package_from_sqlite(self, sqlite_file, query, label_key, image_path_key, category_limit, save_path=None):
con = sqlite3.connect(sqlite_file)
all_photos = | pd.read_sql_query(query, con) | pandas.read_sql_query |
#!usr/bin/env ipython
# Functions related to loading, saving, processing datasets
import tensorflow.keras.datasets as datasets
from tensorflow.keras import Model
import numpy as np
import pandas as pd
import os
from pathlib import Path
from scipy.stats import entropy
from scipy.spatial.distance import cosine
from sklearn.random_projection import GaussianRandomProjection
from sklearn.decomposition import PCA
import ipdb
from cfg_utils import load_cfg
from model_utils import build_model
# CONSTANTS
FOREST_PATH = os.path.join('data', 'covtype.data')
ADULT_PATH = os.path.join('data', 'adult.data')
ADULT_TEST_PATH = os.path.join('data', 'adult.test')
CIFAR10_PRETRAIN_PATH = os.path.join('data', 'cifar10_pretrain.npy')
def min_max_rescale(df_train, df_test, good_columns=None):
if good_columns is None:
col_mins = df_train.min(axis=0)
col_maxs = df_train.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
print('Deleting', df_train.shape[1] - sum(good_columns), 'columns for not exhibiting variability')
df_train = df_train[:, good_columns]
df_test = df_test[:, good_columns]
print('Rescaling to [0, 1]...')
col_mins = df_train.min(axis=0)
col_maxs = df_train.max(axis=0)
col_ranges = np.float32(col_maxs - col_mins)
# if there's no variability, basically just mapping it to 0.5
col_ranges[col_ranges == 0] = 2*col_maxs[col_ranges == 0] + 1e-5
df_train = (df_train - col_mins)/col_ranges
df_test = (df_test - col_mins)/col_ranges
assert np.isnan(df_train).sum() == 0
assert np.isnan(df_test).sum() == 0
return df_train, df_test
def load_data(options, replace_index):
# these are shared options
data_type = options['name']
data_privacy = 'all'
print('WARNING: Data privacy is fixed to all right now')
if data_type == 'mnist':
flatten = options['flatten']
binary = options['binary']
if binary:
# only care about doing this for binary classification atm, could just make an option
enforce_max_norm = True
else:
enforce_max_norm = False
if 'preprocessing' in options:
if options['preprocessing'] == 'PCA':
project = True
pca = True
crop = False
elif options['preprocessing'] == 'GRP':
project = True
pca = False
crop = False
elif options['preprocessing'] == 'crop':
project = False
pca = False
crop = True
else:
project = False
pca = False
crop = False
x_train, y_train, x_test, y_test = load_mnist(binary=binary,
enforce_max_norm=enforce_max_norm,
flatten=flatten,
data_privacy=data_privacy,
project=project,
crop=crop,
pca=pca)
elif data_type == 'cifar10':
flatten = options['flatten']
binary = options['binary']
subset = options['subset']
if binary:
enforce_max_norm = True
else:
enforce_max_norm = False
if flatten:
project = True
pca = True
else:
project = False
pca = False
x_train, y_train, x_test, y_test = load_cifar10(binary=binary,
enforce_max_norm=enforce_max_norm,
flatten=flatten,
data_privacy=data_privacy,
project=project,
pca=pca,
subset=subset)
elif data_type == 'cifar10_pretrain':
binary = options['binary']
if binary:
enforce_max_norm = True
else:
enforce_max_norm = False
x_train, y_train, x_test, y_test = load_cifar10_pretrain(binary=binary,
enforce_max_norm=enforce_max_norm)
elif data_type == 'cifar100':
# No options here
x_train, y_train, x_test, y_test = load_cifar100()
elif data_type == 'forest':
x_train, y_train, x_test, y_test = load_forest(data_privacy=data_privacy)
elif data_type == 'adult':
pca = False
if 'preprocessing' in options and options['preprocessing'] == 'PCA':
print('WARNING: When are we doing PCA with adult?')
pca = True
x_train, y_train, x_test, y_test = load_adult(data_privacy=data_privacy, pca=pca)
else:
raise ValueError(data_type)
x_train, y_train, x_vali, y_vali, x_test, y_test = validation_split(x_train, y_train, x_test, y_test, replace_index)
# Convert everything to float32
x_train = np.float32(x_train)
y_train = np.float32(y_train)
x_vali = np.float32(x_vali)
y_vali = np.float32(y_vali)
x_test = np.float32(x_test)
y_test = np.float32(y_test)
return x_train, y_train, x_vali, y_vali, x_test, y_test
def validation_split(x_train, y_train, x_test, y_test, replace_index):
# we need to generate a validation set (do it from the train set)
N = x_train.shape[0]
n_vali = int(0.1*N)
vali_idx = range(n_vali)
train_idx = [i for i in range(N) if i not in vali_idx]
assert len(set(vali_idx).intersection(set(train_idx))) == 0
x_vali = x_train[vali_idx]
y_vali = y_train[vali_idx]
x_train = x_train[train_idx]
y_train = y_train[train_idx]
if replace_index:
replace_index = int(replace_index)
# we always replace with ELEMENT 0 (wlog, ish), then don't use the first row
# (this is to avoid an effect where experiments where the replace_index is low encounter an unusually
# low-variance batch at the start of training!)
special_idx = 0
x_special = x_train[special_idx]
y_special = y_train[special_idx]
x_train[replace_index] = x_special
y_train[replace_index] = y_special
x_train = np.delete(x_train, special_idx, axis=0)
y_train = np.delete(y_train, special_idx, axis=0)
return x_train, y_train, x_vali, y_vali, x_test, y_test
def load_forest(data_privacy='all'):
path = os.path.join('data', 'forest_' + data_privacy + '.npy')
try:
data = np.load(path, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
except FileNotFoundError:
print('Loading...')
all_data = pd.read_csv(FOREST_PATH, header=None)
# select just types 1 and 2 (these are the most common)
print('Selecting classes 1 and 2')
binary_data = all_data.loc[all_data.iloc[:, -1].isin({1, 2}), :]
# split into features and labels
y = binary_data.iloc[:, -1].values
# rescale to 0 and 1!
y = y - 1
assert set(y) == set([0, 1])
features = binary_data.iloc[:, :-1].values
assert features.shape[1] == 54
N = features.shape[0]
print('Resulting number of examples:', N)
# test-train split
print('Doing test-train split')
train_frac = 0.85
n_train = int(N*train_frac)
train_idx = np.random.choice(N, n_train, replace=False)
test_idx = [x for x in range(N) if x not in train_idx]
print('n train:', n_train, 'n test:', len(test_idx))
x_train = features[train_idx, :]
x_test = features[test_idx, :]
y_train = y[train_idx]
y_test = y[test_idx]
# need to keep this to make sure the columns are all the same... when we do public/private split
x_train_orig = x_train.copy()
# do public/private split
x_train, y_train, x_test, y_test = public_private_split('forest', data_privacy,
x_train, y_train,
x_test, y_test)
# now we need to normalise this
# rescale to 0-1 first
col_mins = x_train_orig.min(axis=0)
col_maxs = x_train_orig.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
del x_train_orig
x_train, x_test = min_max_rescale(x_train, x_test, good_columns=good_columns)
# and NOW we project to the unit sphere
print('Projecting to sphere...')
x_train = x_train / np.linalg.norm(x_train, axis=1).reshape(-1, 1)
x_test = x_test / np.linalg.norm(x_test, axis=1).reshape(-1, 1)
assert np.all(np.abs(np.linalg.norm(x_train, axis=1) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=1) - 1) < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
print('Saving...')
np.save(path, data)
return x_train, y_train, x_test, y_test
def public_private_split(dataset, data_privacy, x_train, y_train, x_test, y_test):
"""
"""
if data_privacy == 'all':
print('Including all data')
else:
print('Splitting data into public/private!')
split_path = os.path.join('data', dataset + '_public_private_split.npy')
try:
split = np.load(split_path, allow_pickle=True).item()
print('Loaded pre-computed split from', split_path)
public_train_idx = split['public_train_idx']
public_test_idx = split['public_test_idx']
private_train_idx = split['private_train_idx']
private_test_idx = split['private_test_idx']
except FileNotFoundError:
print('No pre-defined split found!')
N_train = x_train.shape[0]
N_test = x_test.shape[0]
public_train_idx = np.random.choice(N_train, int(0.5*N_train), replace=False)
public_test_idx = np.random.choice(N_test, int(0.5*N_test), replace=False)
private_train_idx = np.array([i for i in range(N_train) if i not in public_train_idx])
private_test_idx = np.array([i for i in range(N_test) if i not in public_test_idx])
assert len(set(public_train_idx).intersection(set(private_train_idx))) == 0
assert len(set(public_test_idx).intersection(set(private_test_idx))) == 0
split = {'public_train_idx': public_train_idx,
'public_test_idx': public_test_idx,
'private_train_idx': private_train_idx,
'private_test_idx': private_test_idx}
np.save(split_path, split)
print('Saved split to', split_path)
if data_privacy == 'public':
x_train = x_train[public_train_idx]
y_train = y_train[public_train_idx]
x_test = x_test[public_test_idx]
y_test = y_test[public_test_idx]
elif data_privacy == 'private':
x_train = x_train[private_train_idx]
y_train = y_train[private_train_idx]
x_test = x_test[private_test_idx]
y_test = y_test[private_test_idx]
return x_train, y_train, x_test, y_test
def load_mnist(binary=False, enforce_max_norm=False, flatten=True,
data_privacy='all', project=True, pca=False, crop=False):
dataset_identifier = 'mnist' + '_' + data_privacy + '_binary'*binary + '_maxnorm'*enforce_max_norm + '_square'*(not flatten) + '_pca'*pca + '_crop'*crop + '.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
# cant load from file, build it up again
mnist = datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, y_train, x_test, y_test = public_private_split('mnist', data_privacy, x_train, y_train, x_test, y_test)
if binary:
# keep only 3 and 5 (I chose these randomly)
keep_train = (y_train == 3) | (y_train == 5)
keep_test = (y_test == 3) | (y_test == 5)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (5 is 1, 3 is 0)
y_train[y_train == 5] = 1
y_train[y_train == 3] = 0
y_test[y_test == 5] = 1
y_test[y_test == 3] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
if crop:
assert x_train.shape[1:] == (28, 28)
assert x_test.shape[1:] == (28, 28)
x_train = x_train[:, 9:19, 9:19]
x_test = x_test[:, 9:19, 9:19]
side_length = 10
else:
side_length = 28
if flatten:
x_train = x_train.reshape(-1, side_length*side_length)
x_test = x_test.reshape(-1, side_length*side_length)
if project:
# you can only project flattened data
# by default we do gaussian random projections
if pca:
# do PCA down to 50
# in the Abadi paper they do 60 dimensions, but to help comparison with Wu I'd rather do 50 here
transformer = PCA(n_components=50)
else:
# do random projection on MNIST
# in the Wu paper they project to 50 dimensions
transformer = GaussianRandomProjection(n_components=50)
# fit to train data
transformer.fit(x_train)
# transform everything
x_train = transformer.transform(x_train)
x_test = transformer.transform(x_test)
assert x_train.shape[1] == 50
assert x_test.shape[1] == 50
else:
# keeping it not-flat
# just add a sneaky little dimension on there for the CNN
x_train = x_train.reshape(-1, side_length, side_length, 1)
x_test = x_test.reshape(-1, side_length, side_length, 1)
if enforce_max_norm:
# slightly different normalisation to what's normal in MNIST
if len(x_train.shape) == 2:
axis = (1)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1)
elif len(x_train.shape) == 4:
axis = (1, 2)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1, 1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1, 1, 1)
else:
raise ValueError(x_train.shape)
x_train = np.where(train_norms > 1, x_train/train_norms, x_train)
x_test = np.where(test_norms > 1, x_test/test_norms, x_test)
assert np.all(np.abs(np.linalg.norm(x_train, axis=axis) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=axis) - 1) < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_cifar10(binary=False, enforce_max_norm=False, flatten=True,
data_privacy='all', project=True, pca=False, crop=False,
subset: bool = True):
"""
copying what i did for mnist, but for cifar10
cropping is also a 10x10 square in the middle
"""
dataset_identifier = 'cifar10' + '_' + data_privacy + '_binary'*binary + '_maxnorm'*enforce_max_norm + '_square'*(not flatten) + '_pca'*pca + '_crop'*crop + '_subset'*subset + '.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
cifar10 = datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = y_train[:, 0]
y_test = y_test[:, 0]
x_train, y_train, x_test, y_test = public_private_split('cifar10', data_privacy,
x_train, y_train,
x_test, y_test)
if binary:
# keep only 3 and 5
# coincidentally, although i chose 3 and 5 randomly for MNIST,
# in CIFAR10 these correspond to cats and dogs, which is a convenient pair
keep_train = (y_train == 0) | (y_train == 2)
keep_test = (y_test == 0) | (y_test == 2)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (2 is 1, 0 is 0)
y_train[y_train == 2] = 1
y_train[y_train == 0] = 0
y_test[y_test == 2] = 1
y_test[y_test == 0] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
if crop:
assert x_train.shape[1:] == (32, 32, 3)
assert x_test.shape[1:] == (32, 32, 3)
x_train = x_train[:, 11:21, 11:21, :]
x_test = x_test[:, 11:21, 11:21, :]
side_length = 10
else:
side_length = 32
if flatten:
# greyscale conversion from RGB
# Y = 0.2989 R + 0.5870 G + 0.1140 B
# greyscale_weights = [0.2989, 0.5870, 0.1140]
# x_train = 1 - np.dot(x_train, greyscale_weights)
# x_test = 1 - np.dot(x_test, greyscale_weights)
x_train = x_train.reshape(-1, side_length*side_length*3)
x_test = x_test.reshape(-1, side_length*side_length*3)
if project:
# you can only project flattened data
n_dim = 50
# by default we do gaussian random projections
if pca:
# do PCA down to 50
# in the Abadi paper they do 60 dimensions, but to help comparison with Wu I'd rather do 50 here
transformer = PCA(n_components=n_dim)
else:
# do random projection on MNIST
# in the Wu paper they project to 50 dimensions
transformer = GaussianRandomProjection(n_components=n_dim)
# fit to train data
transformer.fit(x_train)
# transform everything
x_train = transformer.transform(x_train)
x_test = transformer.transform(x_test)
assert x_train.shape[1] == n_dim
assert x_test.shape[1] == n_dim
else:
# keeping it not-flat
assert len(x_train.shape) == 4
assert len(x_test.shape) == 4
if enforce_max_norm:
if len(x_train.shape) == 2:
axis = (1)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1)
elif len(x_train.shape) == 4:
axis = (1, 2)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1, 1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1, 1, 1)
else:
raise ValueError(x_train.shape)
x_train = np.where(train_norms > 1, x_train/train_norms, x_train)
x_test = np.where(test_norms > 1, x_test/test_norms, x_test)
assert np.all(np.abs(np.linalg.norm(x_train, axis=axis) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=axis) - 1) < 1e-6)
if subset:
# Copying Yeom, take a random 15,000 samples from the dataset
# and make the train and test splits the same size
# take the train from the train
assert x_train.shape[0] >= 15000
train_idx_subset = np.random.choice(x_train.shape[0], 15000, replace=False)
remaining_available = [x for x in range(15000) if not x in train_idx_subset]
x_train = x_train[train_idx_subset]
y_train = y_train[train_idx_subset]
assert x_test.shape[0] < 15000
remaining_required = 15000 - x_test.shape[0]
test_idx_additional = np.random.choice(remaining_available, remaining_required, replace=False)
for x in test_idx_additional:
assert x not in train_idx_subset
x_test_additional = x_train[test_idx_additional]
y_test_additional = y_train[test_idx_additional]
x_test = np.concatenate([x_test, x_test_additional])
y_test = np.concatenate([y_test, y_test_additional])
assert x_train.shape[0] == 15000
assert y_train.shape[0] == 15000
assert x_test.shape[0] == 15000
assert y_test.shape[0] == 15000
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_cifar10_pretrain(binary=False, enforce_max_norm=False):
"""
"""
dataset_identifier = f'cifar10_pretrain{binary*"_binary"}{enforce_max_norm*"_maxnorm"}.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
print(f'Attempting to load data from {CIFAR10_PRETRAIN_PATH}')
try:
cifar10_pretrain = np.load(CIFAR10_PRETRAIN_PATH, allow_pickle=True).item()
x_train = cifar10_pretrain['x_train']
x_test = cifar10_pretrain['x_test']
y_train = cifar10_pretrain['y_train']
y_test = cifar10_pretrain['y_test']
print(f'Loaded pre-processed data from {CIFAR10_PRETRAIN_PATH}')
except FileNotFoundError:
print(f'ERROR: Couldn\'t find {CIFAR10_PRETRAIN_PATH}!')
print('... are you sure you have already preprocessed CIFAR10 using the CIFAR100 model?')
raise FileNotFoundError
if binary:
# Copied from load_cifar10
# keep only 3 and 5
# coincidentally, although i chose 3 and 5 randomly for MNIST,
# in CIFAR10 these correspond to cats and dogs, which is a convenient pair
keep_train = (y_train == 0) | (y_train == 2)
keep_test = (y_test == 0) | (y_test == 2)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (2 is 1, 0 is 0)
y_train[y_train == 2] = 1
y_train[y_train == 0] = 0
y_test[y_test == 2] = 1
y_test[y_test == 0] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
if enforce_max_norm:
assert len(x_train.shape) == 2
train_norms = np.linalg.norm(x_train, axis=1).reshape(-1, 1)
test_norms = np.linalg.norm(x_test, axis=1).reshape(-1, 1)
x_train = np.where(train_norms > 1, x_train/train_norms, x_train)
x_test = np.where(test_norms > 1, x_test/test_norms, x_test)
# Don't need an abs because it just neesd to be BELOW 1, not equal to q
assert np.all(np.linalg.norm(x_train, axis=1) - 1 < 1e-6)
assert np.all(np.linalg.norm(x_test, axis=1) - 1 < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_cifar100():
"""
We only use CIFAR100 for pretraining a CNN for CIFAR10, so we don't need to
be able to flatten, etc.
"""
dataset_identifier = 'cifar100.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
cifar100 = datasets.cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
ipdb.set_trace()
y_train = y_train[:, 0]
y_test = y_test[:, 0]
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
# keeping it not-flat
assert len(x_train.shape) == 4
assert len(x_test.shape) == 4
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_adult(data_privacy='all', pca=False):
"""
"""
path = os.path.join('data', 'adult' + '_' + data_privacy + '_pca'*pca + '.npy')
try:
data = np.load(path, allow_pickle=True).item()
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
print('Loaded from file')
except FileNotFoundError:
adult_header = ['age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'label']
df = pd.read_csv(ADULT_PATH, sep=', ', header=None)
df_test = pd.read_csv(ADULT_TEST_PATH, sep=', ', skiprows=1, header=None)
df.columns = adult_header
df_test.columns = adult_header
label_replace_dict = {'>50K': 1, '<=50K': 0,
'>50K.': 1, '<=50K.': 0}
y_train = df['label'].replace(label_replace_dict).values
y_test = df_test['label'].replace(label_replace_dict).values
assert set(y_train) == set([0, 1])
assert set(y_test) == set([0, 1])
x_train = df.iloc[:, :-1]
x_test = df_test.iloc[:, :-1]
# need to one-hot encode
# pd.dummies does this, it is also smart about identifying categorical columns
x_train = pd.get_dummies(x_train, drop_first=True)
x_test = | pd.get_dummies(x_test, drop_first=True) | pandas.get_dummies |
import numpy as np
import pandas as pd
import pickle
import streamlit as st
from nltk.corpus import stopwords
from sklearn import metrics
stop = stopwords.words('english')
from sklearn.metrics import confusion_matrix #import confusion_matrix
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
test = | pd.read_csv('drugsComTest_raw.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).sum()
df['log_ret_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).sum()
df['log_ret_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).sum()
df['log_ret_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).sum()
df['vol_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).std()*np.sqrt(5)
df['vol_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).std()*np.sqrt(10)
df['vol_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).std()*np.sqrt(15)
df['vol_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).std()*np.sqrt(20)
df['vol_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).std()*np.sqrt(40)
df['vol_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).std()*np.sqrt(60)
df['vol_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).std()*np.sqrt(80)
df['vol_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).std()*np.sqrt(100)
df['vol_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).std()*np.sqrt(120)
df['vol_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).std()*np.sqrt(140)
df['vol_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).std()*np.sqrt(160)
df['vol_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).std()*np.sqrt(180)
df['vol_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).std()*np.sqrt(200)
df['vol_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).std()*np.sqrt(220)
df['vol_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).std()*np.sqrt(240)
df['vol_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).std()*np.sqrt(260)
df['vol_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).std()*np.sqrt(280)
df['vol_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).std()*np.sqrt(300)
df['vol_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).std()*np.sqrt(320)
df['vol_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).std()*np.sqrt(340)
df['vol_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).std()*np.sqrt(360)
df['vol_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).std()*np.sqrt(380)
df['vol_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).std()*np.sqrt(400)
df['volume_1w'] = pd.Series(df['intraday_volumes']).rolling(window=5).mean()
df['volume_2w'] = pd.Series(df['intraday_volumes']).rolling(window=10).mean()
df['volume_3w'] = pd.Series(df['intraday_volumes']).rolling(window=15).mean()
df['volume_4w'] = pd.Series(df['intraday_volumes']).rolling(window=20).mean()
df['volume_8w'] = pd.Series(df['intraday_volumes']).rolling(window=40).mean()
df['volume_12w'] = pd.Series(df['intraday_volumes']).rolling(window=60).mean()
df['volume_16w'] = pd.Series(df['intraday_volumes']).rolling(window=80).mean()
df['volume_20w'] = | pd.Series(df['intraday_volumes']) | pandas.Series |
import numpy as np
from matplotlib import pyplot as plt
from pandas import Series
from mpl_toolkits.mplot3d import axes3d
def plotData(X, y):
pos = X[np.where(y == 1, True, False).flatten()]
neg = X[np.where(y == 0, True, False).flatten()]
plt.plot(pos[:, 0], pos[:, 1], 'X', markersize=7, markeredgecolor='black', markeredgewidth=2)
plt.plot(neg[:, 0], neg[:, 1], 'D', markersize=7, markeredgecolor='black', markerfacecolor='springgreen')
def plotDecisionBoundary(theta, X, y):
"""
Plots the data points X and y into a new figure with the decision boundary defined by theta
PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
positive examples and o for the negative examples. X is assumed to be
a either
1) Mx3 matrix, where the first column is an all-ones column for the
intercept.
2) MxN, N>3 matrix, where the first column is all-ones
"""
# Plot Data
plt.figure(figsize=(15, 10))
plotData(X[:, 1:], y)
if X.shape[1] <= 3:
# Only need 2 points to define a line, so choose two endpoints
plot_x = np.array([min(X[:, 2]), max(X[:, 2])])
# Calculate the decision boundary line
plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])
# Plot, and adjust axes for better viewing
plt.plot(plot_x, plot_y)
else:
# Here is the grid range
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z = [
np.array([mapFeature2(u[i], v[j]).dot(theta) for i in range(len(u))])
for j in range(len(v))
]
print(z[0])
plt.contour(u, v, z, levels=[0.0])
# Legend, specific for the exercise
# axis([30, 100, 30, 100])
def mapFeature(X, degree=6):
"""
Feature mapping function to polynomial features
MAPFEATURE(X, degree) maps the two input features
to quadratic features used in the regularization exercise.
Returns a new feature array with more features, comprising of
X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc..
"""
quads = Series([X.iloc[0] ** (i-j) * X.iloc[1] ** j for i in range(1, degree + 1) for j in range(i + 1)])
return Series([1]).append([X, quads])
def mapFeature2(X1, X2, degree=6):
"""
Feature mapping function to polynomial features
MAPFEATURE(X, degree) maps the two input features
to quadratic features used in the regularization exercise.
Returns a new feature array with more features, comprising of
X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc..
"""
quads = Series([X1 ** (i - j) * X2 ** j for i in range(1, degree + 1) for j in range(i + 1)])
return | Series([1]) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import scipy.stats as stats
from datetime import datetime, timedelta
import math
import os
import logging
from pathlib import Path
import ast
class ut1000():
'''
Class dedicated to processing ut1000 data only
'''
def __init__(self):
self.study = 'ut1000'
class ut2000():
'''
Class dedicated to processing ut2000 data only
'''
def __init__(self):
self.study = 'ut2000'
def get_beacon_datetime_index(self,df,resample_rate='10T'):
'''
Takes the utc timestamp index, converts it to datetime, sets the index, and resamples
'''
dt = []
for i in range(len(df)):
if isinstance(df.index[i], str):
try:
ts = int(df.index[i])
except ValueError:
ts = int(df.index[i][:-2])
dt.append(datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))
else:
dt.append(datetime.now())
df['datetime'] = dt
df['datetime'] = pd.to_datetime(df['datetime'])
df.set_index('datetime',inplace=True)
df = df.resample('10T').mean()
return df
def process_beacon(self,data_dir='../../data/raw/ut2000/beacon/'):
'''
Combines data from all sensors on all beacons
'''
beacons = pd.DataFrame()
measurements = ['pm1.0','pm2.5','pm10','std1.0','std2.5','std10','pc0.3','pc0.5','pc1.0','pc2.5','pc5.0','pc10.0']
for folder in os.listdir(data_dir):
beacon_no = folder[-2:]
if beacon_no in ['07','12','09','03','08','02','01','06','05','10']:
beaconPM = pd.DataFrame()
for file in os.listdir(f'{data_dir}{folder}/bevo/pms5003/'):
if file[-1] == 'v':
temp = pd.read_csv(f'{data_dir}{folder}/bevo/pms5003/{file}',names=measurements,
parse_dates=True,infer_datetime_format=True)
if len(temp) > 1:
beaconPM = | pd.concat([beaconPM,temp]) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import random
import matplotlib
from collections import OrderedDict
import seaborn as sns
import matplotlib.gridspec as gridspec
from matplotlib.font_manager import FontProperties
plt.rcdefaults()
plt.rc('xtick.major', size = 0, width=0)
plt.rc('ytick.major', size = 0, width=0)
data_dir = r'/home/greydon/Documents/GitHub/afids-clinical/data/input_fid_native'
data_mni_dir = r'/home/greydon/Documents/GitHub/afids-clinical/data/input_fid_MNI_linear_combined'
#data_dir = r'C:\Users\greydon\Documents\github\afids_parkinsons\input\input_fid'
show_only = True
sub_ignore = [146]
fid_dic = {1: 'AC',
2: 'PC',
3: 'ICS',
4: 'PMJ',
5: 'SIPF',
6: 'RSLMS',
7: 'LSLMS',
8: 'RILMS',
9: 'LILMS',
10: 'CUL',
11: 'IMS',
12: 'RMB',
13: 'LMB',
14: 'PG',
15: 'RLVAC',
16: 'LLVAC',
17: 'RLVPC',
18: 'LLVPC',
19: 'GENU',
20: 'SPLE',
21: 'RALTH',
22: 'LALTH',
23: 'RSAMTH',
24: 'LSAMTH',
25: 'RIAMTH',
26: 'LIAMTH',
27: 'RIGO',
28: 'LIGO',
29: 'RVOH',
30: 'LVOH',
31: 'ROSF',
32: 'LOSF'
}
fid_desc = {1: 'AC',
2: 'PC',
3: 'Infracollicular Sulcus',
4: 'PMJ',
5: 'Superior IPF',
6: 'Right Superior LMS',
7: 'Left Superior LMS',
8: 'Right Inferior LMS',
9: 'Left Inferior LMS',
10: 'Culmen',
11: 'Intermammillary Sulcus',
12: 'Right Mammilary Body',
13: 'Left Mammilary Body',
14: 'Pineal Gland',
15: 'Right LV at AC',
16: 'Left LV at AC',
17: 'Right LV at PC',
18: 'Left LV at PC',
19: 'Genu of CC',
20: 'Splenium of CC',
21: 'Right AL Temporal Horn',
22: 'Left AL Tempral Horn',
23: 'R. Sup. AM Temporal Horn',
24: 'L. Sup. AM Temporal Horn',
25: 'R Inf. AM Temp Horn',
26: 'L Inf. AM Temp Horn',
27: 'Right IG Origin',
28: 'Left IG Origin',
29: 'R Ventral Occipital Horn',
30: 'L Ventral Occipital Horn',
31: 'R Olfactory Fundus',
32: 'L Olfactory Fundus'
}
def plot_fiducials(data_plot, expert_mean, data_dir,analysis=2, showOnly=False):
random.seed(1)
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(raters))]
min_val = -2.5
max_val = 2.5
major_ticks = np.linspace(min_val,max_val, 7)
fig = plt.figure(figsize=(18,8))
handles = {}
data_cnt = 1
for ifid in range(4):
for jfid in range(8):
ax = plt.subplot2grid((4,8),(ifid,jfid), projection='3d')
tempData = data_plot[data_plot['fid'].isin([data_cnt])]
rater_labels = tempData['rater'].values
if analysis == 1:
plot_title = 'Distance from the average of expert raters'
file_name = 'distance_from_expert_mean'
tempData = tempData.loc[:,'x':'z'].values - expert_mean.loc[expert_mean['fid'].isin([data_cnt]),'x':'z'].values
elif analysis == 2:
plot_title = 'Distance from the average of all raters'
file_name = 'distance_from_all_raters_mean'
tempData = tempData.loc[:,'x':'z'].values - tempData.loc[:,'x':'z'].mean().values
elif analysis == 3:
plot_title = 'Distance from average MCP'
file_name = 'distance_from_avg_mcp'
tempData = tempData.loc[:,'x':'z'].values - tempData.loc[:,'x':'z'].mean().values
nov_cnt = 1
exp_cnt = 1
rater_labels_final_tmp = {}
for i in range(len(rater_labels)): #plot each point + it's index as text above
if rater_labels[i] in ('AT','RC','MJ'):
rate_label = f"Novice 0{nov_cnt}"
nov_cnt += 1
else:
rate_label = f"Expert 0{exp_cnt}"
exp_cnt += 1
rater_labels_final_tmp[rate_label] = rater_labels[i]
rater_labels_final = {}
for irate in sorted(list(rater_labels_final_tmp)):
rater_labels_final[irate]=rater_labels_final_tmp[irate]
print(rater_labels_final)
for irate in list(rater_labels_final):
rater_idx = [i for i,x in enumerate(rater_labels) if x == rater_labels_final[irate]][0]
l1 = ax.scatter(tempData[rater_idx,0], tempData[rater_idx,1], tempData[rater_idx,2], marker='o', c=color[rater_idx],edgecolors='black', s=50, label=irate)
handles[irate] = l1
ax.plot((min_val,min_val), (min_val,min_val), (min_val-0.1,max_val+0.1), 'black', linewidth=1.0)
ax.set_xlim([min_val,max_val])
ax.set_ylim([min_val,max_val])
ax.set_zlim([min_val,max_val])
ax.set_xlabel('x',labelpad=-15, fontweight='bold', fontsize=14)
ax.set_ylabel('y',labelpad=-15, fontweight='bold', fontsize=14)
ax.set_zlabel('z',labelpad=-15, fontweight='bold', fontsize=14)
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.zaxis.set_ticklabels([])
ax.zaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.set_xticks(major_ticks)
ax.set_yticks(major_ticks)
ax.set_zticks(major_ticks)
ax.grid(which='major', alpha=0.5)
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.zaxis.pane.set_edgecolor('black')
ax.xaxis.pane.set_alpha(1)
ax.yaxis.pane.set_alpha(1)
ax.zaxis.pane.set_alpha(1)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.view_init(elev=25, azim=44)
ax.set_title(str(data_cnt) + ': ' + fid_dic[data_cnt], pad=2, fontweight='bold', fontsize=16)
data_cnt += 1
fig.subplots_adjust(hspace=0.15, wspace=0.05, top=0.90, bottom=0.06, left=0.02,right=0.9)
plt.legend(handles=handles.values(), fontsize=12, bbox_to_anchor=[2.1, 2.7], handletextpad=0.05)
fig.suptitle(plot_title, y = 0.98, fontsize=22, fontweight='bold')
if not showOnly:
output_dir = os.path.join(data_dir,'plots')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir, f"{file_name}.svg"),transparent=True)
plt.savefig(os.path.join(output_dir, f"{file_name}.png"),transparent=True,dpi=450)
plt.savefig(os.path.join(output_dir, f"{file_name}_white.png"),transparent=False,dpi=450)
plt.close()
#%%
data_dir_out = r'/home/greydon/Documents'
sub_ignore = [146]
raters = [x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x)) and 'mean' not in x]
rater_final = pd.DataFrame({})
iter_cnt = 0
for irater in raters:
patient_files = os.listdir(os.path.join(data_dir, irater))
for isub in patient_files:
sub_num = int(''.join([s for s in isub if s.isdigit()]))
fileN = os.path.join(data_dir, irater,isub, os.listdir(os.path.join(data_dir, irater,isub))[0])
data_table = pd.read_csv(fileN, skiprows=3, header=None)
data_table['rater'] = np.repeat(irater,data_table.shape[0])
data_table['subject'] = np.repeat(sub_num,data_table.shape[0])
rater_final = pd.concat([rater_final, data_table], axis = 0, ignore_index=True)
rater_final.rename(columns={0:'node_id', 1:'x', 2:'y', 3:'z', 4:'ow', 5:'ox',
6:'oy', 7:'oz', 8:'vis', 9:'sel', 10:'lock',
11:'fid', 12:'description', 13:'associatedNodeID'}, inplace=True)
Sub = pd.DataFrame({})
size = []
for r in raters:
sub_temp = np.unique(rater_final[rater_final['rater']==r]['subject'])
if sub_ignore:
sub_temp = [x for x in sub_temp if x not in sub_ignore]
data_table = pd.DataFrame({'rater': np.repeat(r,len(sub_temp)), 'subject':sub_temp})
Sub = pd.concat([Sub, data_table], axis = 0, ignore_index=True)
size.append((r,len(sub_temp)))
full_subs = set(Sub[Sub['rater']==size[0][0]]['subject'].values)
size = sorted(size, key=lambda tup: tup[1], reverse=True)
Sub_Comp = list(set(Sub[Sub['rater']==size[0][0]]['subject'].values) &
set(Sub[Sub['rater']==size[1][0]]['subject'].values))
for irate in range(2,len(raters)):
Sub_Comp = list(set(Sub_Comp) & set(Sub[Sub['rater']==size[irate][0]]['subject'].values))
Sub_Comp = sorted(Sub_Comp)
#set(full_subs).difference(Sub[Sub['rater']==size[4][0]]['subject'].values)
Data_comp = rater_final[rater_final['subject'].isin(Sub_Comp)]
Data_comp = Data_comp.sort_values(['rater','subject', 'fid'], ascending=[True, True,True])
Tot_Data = np.zeros((32,5,len(Sub_Comp),len(raters)))
for irate in range(len(raters)):
for isub in range(len(Sub_Comp)):
Tot_Data[:,:,isub,irate] = Data_comp[(Data_comp['rater']==raters[irate]) & (Data_comp['subject']==Sub_Comp[isub])].sort_values(['fid']).loc[:,['fid','x','y','z','subject']]
Tot_mean = np.mean(Tot_Data,3)
N = Tot_mean[:,:,:,np.newaxis]
Tot_diff = Tot_Data - np.tile(N,[1,1,1,len(raters)])
Tot_eudiff = np.sqrt(Tot_diff[:,1,:,:]**2 + Tot_diff[:,2,:,:]**2 + Tot_diff[:,3,:,:]**2)
Rater_AFLE = np.mean(Tot_eudiff,2).T
Rater_AFLE_mean = np.mean(Tot_eudiff,1)
Rater_AFLE_SD = np.std(Tot_eudiff,1)
Total_AFLE_mean = np.mean(Rater_AFLE_mean,1)
Total_AFLE_SD = np.std(Tot_eudiff,1)
mcp_point = pd.DataFrame({})
for r in raters:
for s in Sub_Comp:
ac = Data_comp.loc[(Data_comp['rater']==r) & (Data_comp['subject']==s) & (Data_comp['fid']==1),'x':'z'].values[0]
pc = Data_comp.loc[(Data_comp['rater']==r) & (Data_comp['subject']==s)& (Data_comp['fid']==2),'x':'z'].values[0]
mcp = (ac + pc)/2
data_table = pd.DataFrame({'rater': r, 'subject': s, 'x': mcp[0], 'y': mcp[1],'z': mcp[2]}, index=[0] )
mcp_point = pd.concat([mcp_point, data_table], axis = 0, ignore_index=True)
mcp_point_mean = mcp_point.groupby(['subject'])['x','y','z'].mean()
data_from_mcp = pd.DataFrame({})
for r in raters:
for s in Sub_Comp:
for f in range(1,33):
point = Data_comp.loc[(Data_comp['rater']==r) & (Data_comp['subject']==s) & (Data_comp['fid']==f),'x':'z'].values[0]
di = point - mcp_point_mean.loc[s,:].values
euclidean = np.sqrt(di[0]**2 + di[1]**2 + di[2]**2)
data_table = pd.DataFrame({'rater': r, 'subject': s, 'fid': f, 'distance': euclidean,'x': di[0], 'y': di[1],'z': di[2]}, index=[0] )
data_from_mcp = pd.concat([data_from_mcp, data_table], axis = 0, ignore_index=True)
data_from_mcp_avg = data_from_mcp.groupby(['rater','fid'])['x','y','z'].mean().reset_index()
#%%
sub_ignore = [146]
rater_mni_final = pd.DataFrame({})
iter_cnt = 0
for irater in raters:
patient_files = os.listdir(os.path.join(data_mni_dir, irater))
for isub in patient_files:
sub_num = int(''.join([s for s in isub if s.isdigit()]))
fileN = os.path.join(data_mni_dir, irater,isub, [x for x in os.listdir(os.path.join(data_mni_dir, irater,isub)) if x.endswith('_nlin.fcsv')][0])
data_table = pd.read_csv(fileN, skiprows=3, header=None)
data_table['rater'] = np.repeat(irater,data_table.shape[0])
data_table['subject'] = np.repeat(sub_num,data_table.shape[0])
rater_mni_final = pd.concat([rater_mni_final, data_table], axis = 0, ignore_index=True)
rater_mni_lin_final = pd.DataFrame({})
iter_cnt = 0
for irater in raters:
patient_files = os.listdir(os.path.join(data_mni_dir, irater))
for isub in patient_files:
sub_num = int(''.join([s for s in isub if s.isdigit()]))
fileN = os.path.join(data_mni_dir, irater,isub, [x for x in os.listdir(os.path.join(data_mni_dir, irater,isub)) if x.endswith('_lin.fcsv')][0])
data_table = pd.read_csv(fileN, skiprows=3, header=None)
data_table['rater'] = np.repeat(irater,data_table.shape[0])
data_table['subject'] = np.repeat(sub_num,data_table.shape[0])
rater_mni_lin_final = pd.concat([rater_mni_lin_final, data_table], axis = 0, ignore_index=True)
rater_mni_final.rename(columns={0:'node_id', 1:'x', 2:'y', 3:'z', 4:'ow', 5:'ox',
6:'oy', 7:'oz', 8:'vis', 9:'sel', 10:'lock',
11:'fid', 12:'description', 13:'associatedNodeID'}, inplace=True)
rater_mni_lin_final.rename(columns={0:'node_id', 1:'x', 2:'y', 3:'z', 4:'ow', 5:'ox',
6:'oy', 7:'oz', 8:'vis', 9:'sel', 10:'lock',
11:'fid', 12:'description', 13:'associatedNodeID'}, inplace=True)
Sub = pd.DataFrame({})
size = []
for r in raters:
sub_temp = np.unique(rater_mni_final[rater_mni_final['rater']==r]['subject'])
if sub_ignore:
sub_temp = [x for x in sub_temp if x not in sub_ignore]
data_table = pd.DataFrame({'rater': np.repeat(r,len(sub_temp)), 'subject':sub_temp})
Sub = pd.concat([Sub, data_table], axis = 0, ignore_index=True)
size.append((r,len(sub_temp)))
full_subs = set(Sub[Sub['rater']==size[0][0]]['subject'].values)
size = sorted(size, key=lambda tup: tup[1], reverse=True)
Sub_Comp = list(set(Sub[Sub['rater']==size[0][0]]['subject'].values) &
set(Sub[Sub['rater']==size[1][0]]['subject'].values))
for irate in range(2,len(raters)):
Sub_Comp = list(set(Sub_Comp) & set(Sub[Sub['rater']==size[irate][0]]['subject'].values))
Sub_Comp = sorted(Sub_Comp)
Data_mni_comp = rater_mni_final[rater_mni_final['subject'].isin(Sub_Comp)]
Data_mni_comp = Data_mni_comp.sort_values(['rater','subject', 'fid'], ascending=[True, True,True])
Data_mni_lin_comp = rater_mni_lin_final[rater_mni_lin_final['subject'].isin(Sub_Comp)]
Data_mni_lin_comp = Data_mni_lin_comp.sort_values(['rater','subject', 'fid'], ascending=[True, True,True])
Tot_Data = np.zeros((32,5,len(Sub_Comp),len(raters)))
Tot_Data_lin = np.zeros((32,5,len(Sub_Comp),len(raters)))
for irate in range(len(raters)):
for isub in range(len(Sub_Comp)):
Tot_Data[:,:,isub,irate] = Data_mni_comp[(Data_mni_comp['rater']==raters[irate]) & (Data_mni_comp['subject']==Sub_Comp[isub])].sort_values(['fid']).loc[:,['fid','x','y','z','subject']]
Tot_Data_lin[:,:,isub,irate] = Data_mni_lin_comp[(Data_mni_lin_comp['rater']==raters[irate]) & (Data_mni_lin_comp['subject']==Sub_Comp[isub])].sort_values(['fid']).loc[:,['fid','x','y','z','subject']]
MNI152NLin2009cAsym_standard = pd.read_csv('/home/greydon/Documents/GitHub/afids-clinical/data/fid_standards/MNI152NLin2009bAsym_rater_standard/MNI152NLin2009bAsym_desc-raterstandard_afids.fcsv', skiprows=2)[['label','x','y','z']].to_numpy()
N = MNI152NLin2009cAsym_standard[:,:,np.newaxis, np.newaxis]
MNI_Diff = Tot_Data[:,:4,:,:] - np.tile(N,[1,1,len(Sub_Comp),len(raters)])
MNI_AFLE = np.sqrt(MNI_Diff[:,1,:,:]**2 + MNI_Diff[:,2,:,:]**2 + MNI_Diff[:,3,:,:]**2)
MNI_Diff_lin = Tot_Data_lin[:,:4,:,:] - np.tile(N,[1,1,len(Sub_Comp),len(raters)])
MNI_AFLE_lin = np.sqrt(MNI_Diff_lin[:,1,:,:]**2 + MNI_Diff_lin[:,2,:,:]**2 + MNI_Diff_lin[:,3,:,:]**2)
MNI_AFLE_rater = np.mean(MNI_AFLE,2).T
MNI_AFLE_scan = np.mean(MNI_AFLE,1)
MNI_AFLE_total = np.mean(MNI_AFLE_rater,0)
MNI_AFLE_std = np.std(MNI_AFLE_rater,0)
np.mean(MNI_AFLE_total)
np.std(MNI_AFLE_total)
MNI_AFLE_lin_rater = np.mean(MNI_AFLE_lin,2).T
MNI_AFLE_lin_scan = np.mean(MNI_AFLE_lin,1)
MNI_AFLE_lin_total = np.mean(MNI_AFLE_lin_rater,0)
MNI_AFLE_lin_std = np.std(MNI_AFLE_lin_rater,0)
np.mean(MNI_AFLE_lin_total)
np.std(MNI_AFLE_lin_total)
df=pd.DataFrame(np.c_[
[N + P for N,P in zip([f'{x:.2f}' for x in MNI_AFLE_lin_total ],[f' ({x:.2f})' for x in MNI_AFLE_lin_std])]+[f'{np.mean(MNI_AFLE_lin_total):.2f} ({np.std(MNI_AFLE_lin_total):.2f})'],
[N + P for N,P in zip([f'{x:.2f}' for x in MNI_AFLE_total ],[f' ({x:.2f})' for x in MNI_AFLE_std])]+[f'{np.mean(MNI_AFLE_total):.2f} ({np.std(MNI_AFLE_total):.2f})']
])
print(df.to_csv(index=None, header=None))
#%%
goldStandard = "MA"
rater = 1
gold_stand_data = Data_comp[Data_comp['rater'].isin([goldStandard])].reset_index()
single_rater_data = Data_comp[Data_comp['rater'].isin([raters[rater]])].reset_index()
Coor_Diff = gold_stand_data.loc[:,'x':'z'].values - (single_rater_data.loc[:,'x':'z'].values)
rater_error = np.sqrt(Coor_Diff[:,0]**2 + Coor_Diff[:,1]**2 + Coor_Diff[:,2]**2)
single_rater_data['rater_error'] = rater_error
error_idx = single_rater_data['rater_error'] > 5.0
check_data = pd.DataFrame({'subject': single_rater_data.loc[error_idx,'subject'].values,
'fid': single_rater_data.loc[error_idx,'fid'].values,
'x': single_rater_data.loc[error_idx,'x'].values,
'y': single_rater_data.loc[error_idx,'y'].values,
'z': single_rater_data.loc[error_idx,'z'].values,
'x_diff': Coor_Diff[error_idx,0],
'y_diff': Coor_Diff[error_idx,1],
'z_diff': Coor_Diff[error_idx,2]})
check_data = check_data.sort_values(['subject', 'fid'], ascending=[True, True])
#%%
GS_raters = ["GG", "MA"]
NGS_raters = [x for x in raters if x not in GS_raters]
GS_mean = Data_comp[Data_comp['rater'].isin(GS_raters)].groupby(['subject','fid'])['x','y','z'].mean().reset_index()
NGS_mean = Data_comp[Data_comp['rater'].isin(NGS_raters)].groupby(['subject','fid'])['x','y','z'].mean().reset_index()
GS_Diff = GS_mean.loc[:,'x':'z'].values - NGS_mean.loc[:,'x':'z'].values
GS_error_rate = np.sqrt(GS_Diff[:,0]**2 + GS_Diff[:,1]**2 + GS_Diff[:,2]**2)
GS_Diff_mean = | pd.DataFrame(np.c_[GS_Diff, GS_mean['subject'].values, GS_mean['fid'].values]) | pandas.DataFrame |
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import logging
logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def read_data_to_df(data_path: str, **read_data_options):
"""
read data depending on its extension and convert it to a pandas dataframe
"""
file_ext = data_path.split('.')[-1]
if file_ext == 'csv' or file_ext == 'txt':
return pd.read_csv(data_path, **read_data_options) if read_data_options else | pd.read_csv(data_path) | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
from datetime import datetime
from dateutil import parser
import time
from scipy.stats import gaussian_kde
from sklearn.gaussian_process import GaussianProcessClassifier
from pandas_datareader import data
import numexpr as ne
seaborn.set()
def numpy_learner_func():
# numpy 计数
rng = np.random.RandomState(0)
x_data = rng.randint(10, size=(3, 4))
print("rng:{}".format(rng))
print("x_data{}".format(x_data))
# 计小于6的数 , np.count_nonzero, np.sum, np.where
num1 = np.count_nonzero(x_data < 6)
num2 = np.sum(x_data < 6)
num3 = np.any(x_data < 6)
num4 = np.all(x_data < 6)
num5 = np.where(x_data < 6)[0]
print(x_data < 6, num3, num4, num5, num5.shape[0])
print("num1 is {}".format(num1))
print("num2 is {}".format(num2))
print(x_data[x_data < 6])
print(9 and 0)
# numpy newaxis 给数组新增维度
x = np.arange(3)
print(x, x.shape)
x1 = x[:, np.newaxis]
print(x1, x1.shape)
x2 = x[:, np.newaxis, np.newaxis]
print(x2, x2.shape)
x3 = np.zeros(10)
np.add.at(x3, [0, 1, 5], 1)
print(x3)
# print("x4 is {}".format(x4))
i = [2, 3, 3, 4, 4, 4]
x3[i] += 1
print(x3)
# np.random.seed(42)
x_np = np.random.randn(100)
bins = np.linspace(-5, 5, 20)
# zeros_like 返回与参数一样shape的数组
counts = np.zeros_like(bins)
print("counts is {}".format(counts))
# np.searchsorted 将数字x_np插入到排好序的list中,返回相应的下标
j = np.searchsorted(bins, x_np)
print("j is {}".format(j))
# np.searchsorted()
# ## numpy 排序 np.sort()返回排好序的新数组
srt_array = np.array([2, 1, 4, 3, 5])
print("sorted:{}".format(np.sort(srt_array)))
# x.sort() Python内置函数sort(),对原数组进行排序,返回原数组
print("x.sort() is {}".format(srt_array.sort()))
sorted_arr = np.array([99, 0, 3, 1, 90])
# np.argsort()返回数组中排序之后的下标
print("np.argsort(srt_array) is {}".format(np.argsort(sorted_arr)))
# np.sort(axis = None)按照维度排序
axis_arr = np.random.RandomState(42).randint(0, 10, (4, 6))
print("the array is {}".format(axis_arr))
print("sort each column of axis_arr, returns {}".format(np.sort(axis_arr, axis=0)))
print("sort each row of axis_arr, returns {}".format(np.sort(axis_arr, axis=1)))
# 部分排序, 分区排序
np_part = np.array([3, 8, 4, 99, 5, 1, 88]) # 1 3 4 5 88 99 3,4, 1, 5,8, 99, 88
print("np_part partition sorted is {}".format(np.partition(np_part, 3,)))
def K_nearest_neighbors_func():
X = np.random.RandomState(42).rand(10, 2) # 10X2 array
plt.scatter(X[:, 0], X[:, 1], s=100)
x_newaxis = X[:, np.newaxis, :]
print("X[:, np.newaxis, :]:", x_newaxis)
print(x_newaxis.shape)
x_newaxis_1 = X[np.newaxis, :, :]
print("x_newaxis_1:", x_newaxis_1)
print(x_newaxis_1.shape)
diff_newaxis = x_newaxis - x_newaxis_1
print("diff_newaxis:", diff_newaxis, diff_newaxis.shape)
sq_differences = diff_newaxis ** 2
dist_sq = sq_differences.sum(-1) # axis 从倒数第2个到第一个
print("dist_sq:", dist_sq, sq_differences.shape, dist_sq.shape)
eye_dist_sq = dist_sq.diagonal() # 返回指定矩阵的对角线
print("eye_dist_sq is {}".format(eye_dist_sq))
nearest = np.argsort(dist_sq, axis=1) # 对列进行从小到大排序,返回排好序之后的索引值
K = 2
nearest_partition = np.argpartition(dist_sq, K+1, axis=1) # 分区排序,返回排好序的索引值
# print("nearest_partition.shape is {}".format(nearest_partition.shape))
# #
# # dis_sq = np.sum((X[:, np.newaxis, :] - X[np.newaxis, :, :])**2, axis=-1)
for i in range(X.shape[0]):
for j in nearest_partition[i, :K+1]:
plt.plot(*zip(X[j], X[i]), color='black')
# k_nearest_neighbors_loop_func(X, K)
plt.show()
def k_nearest_neighbors_loop_func(X, K):
all_dist = {}
index_dict = {}
# 计算每个点与其他点之间的距离并按序排列
for i in range(X.shape[0]):
start_point = X[i, :]
start_point_dis = {}
for j in range(X.shape[0]):
if i != j:
dis = np.sqrt((start_point[0] - X[j, 0])**2 + (start_point[1] - X[j, 1])**2)
# start_point_dis.append(dis)
start_point_dis[j] = dis
# 字典排序,按照值
sorted_start_point_dis = {}
# for item in dict_a.items():
# print(item)
# out.append((item[1], item[0]))
# print(out, sorted(out))
inter_list = sorted(start_point_dis.items(), key = lambda kv:(kv[1], kv[0]))
for each in inter_list:
sorted_start_point_dis[each[0]] = each[1]
all_dist[i] = list(sorted_start_point_dis.keys())[:K]
# 取出最近的两个点index
for a in range(X.shape[0]):
for b in all_dist[a]:
print("a, b", a, b)
plt.plot(*zip(X[a, :], X[b, :]), color='blue')
plt.show()
# print(all_dist)
def pandas_learner():
# pandas 里面的index 是不可变数组或者允许存在重复值的有序集合
indA = pd.Index([1, 3, 5, 7, 9])
indB = pd.Index([2, 3, 5, 7, 11])
index1 = indA & indB # 交集
index2 = indA | indB # 全集
index3 = indA ^ indB # 差集
print(index1, index2, index3)
data = pd.Series([0.25, 0.5, 0.75, 1.0],
index=['a', 'b', 'c', 'd'])
print(data['b'])
print('a' in data)
print(data.keys())
print(list(data.items()))
data['e'] = 1.25
print(data['a': 'c']) # 切片, 包含c列
print(data[0:2])
print(data[(data > 0.3) & (data < 0.8)])
print(data[['a', 'e']])
# loc 根据列标签索引访问
print(data[1])
print(data[1:3])
print(data.loc['a'])
# iloc根据行下标访问行
print(data.iloc[1])
print(data.iloc[1:3])
def pandas_null():
valsl = np.array([1, np.nan, 3, 4])
print(valsl.dtype)
print(1+np.nan)
print(0*np.nan)
print(np.sum(valsl), np.min(valsl), np.max(valsl)) # 任何累加和计算,最大值,最小值聚类函数中含有nan,其结果都是nan
print(np.nansum(valsl), np.nanmin(valsl), np.nanmax(valsl)) # 忽略nan值,计算累加和,最小值,最大值
print(np.nan == None)
data = pd.Series([1, np.nan, 'hello', None])
print(data.isnull())
print(data.notnull())
print(data[data.notnull()])
print("dropnan:", data.dropna())
data_df = | pd.DataFrame([[1, np.nan, 2], [2, 3, 5], [np.nan, 4, 6]]) | pandas.DataFrame |
from time import time
from os import path, listdir
from datetime import timedelta
from datetime import date as dt_date
from datetime import datetime as dt
from numpy import cumprod
from pandas import DataFrame, read_sql_query, read_csv, concat
from functions import psqlEngine
class Investments():
def __init__(self, path = '../investments/', name = 'get_investments', **kwargs):
self.kwargs = kwargs
self.path = path
self.hyperparameters()
self.get_engine()
self.get_dollar()
self.get_all_assets()
self.domestic_bond_returns()
self.get_benchmarks()
self.portfolio_domestic_stocks = self.get_quotas('domestic_stocks')
self.portfolio_international_stocks = self.get_quotas('international_stocks')
self.portfolio_crypto = self.get_quotas('crypto')
# self.portfolio_domestic_options = self.get_quotas('domestic_options')
self.portfolio_domestic_funds = self.get_quotas('domestic_funds')
self.get_portfolio()
self.get_aggregate()
self.get_time_series()
self.dispose_engine()
def __call__(self, flag = 'assets'):
if flag == 'dollar':
return self.dollar
if flag == 'bonds':
return self.domestic_bonds, self.interests
if flag == 'stocks':
return self.domestic_tickers, self.international_tickers
if flag == 'crypto':
return self.crypto, self.fractions
if flag == 'portfolio':
return self.portfolio, self.portfolio_aggregate.round(2)
if flag == 'save':
rounded = self.portfolio.round(2)
rounded2 = self.portfolio_aggregate.round(2)
engine = psqlEngine(self.database)
connection = engine.connect()
rounded.to_sql('portfolio', connection, if_exists = 'replace', index = False)
rounded2.to_sql('aggregate', connection, if_exists = 'replace', index = False)
connection.close()
engine.dispose()
if flag == 'time_series':
return self.portfolio_time_series.round(2)
def hyperparameters(self):
self.database = self.kwargs.get('database', 'database.ini')
self.benchmark_database = self.kwargs.get('benchmarks_database', 'benchmarks')
self.domestic_stocks_database = self.kwargs.get('domestic_database', 'brazil_stocks')
self.domestic_options_database = self.kwargs.get('domestic_database', 'brazil_options')
self.international_database = self.kwargs.get('international_database', 'usa_stocks')
self.currency_database = self.kwargs.get('currency_database', 'currencies')
self.domestic_bonds_path = '{}bonds/'.format(self.path)
self.crypto_path = '{}crypto/'.format(self.path)
self.domestic_stocks_path = '{}stocks/domestic/'.format(self.path)
self.international_stocks_path = '{}stocks/international/'.format(self.path)
self.domestic_options_path = '{}options/domestic/'.format(self.path)
self.domestic_funds_path = '{}funds/domestic/'.format(self.path)
self.list_paths = [
self.domestic_bonds_path,
self.crypto_path,
self.domestic_stocks_path,
self.international_stocks_path,
self.domestic_options_path,
self.domestic_funds_path,
]
self.dates_min = DataFrame()
def get_engine(self):
self.engine = psqlEngine(self.database)
self.connection = self.engine.connect()
def dispose_engine(self):
self.connection.close()
self.engine.dispose()
def get_dollar(self):
currency = 'BRLUSD'
self.dollar = float(read_sql_query("SELECT * FROM {} WHERE ticker = '{}'".format(self.benchmark_database, currency), self.connection).iloc[0].close)
self.dollar_full = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.benchmark_database, currency), self.connection)
self.dollar_full.drop_duplicates('date', inplace = True)
self.dollar_full = self.insert_weekends(self.dollar_full)
self.dollar_full.rename(columns = {'close': 'dollar_close'}, inplace = True)
self.dollar_full['dollar_close'] = self.dollar_full.dollar_close.astype('float')
def get_benchmarks(self):
self.spy = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'SPY' ORDER BY date".format(self.benchmark_database), self.connection)
self.bova = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'BOVA11' ORDER BY date".format(self.benchmark_database), self.connection)
self.spy.drop_duplicates('date', inplace = True)
self.bova.drop_duplicates('date', inplace = True)
self.spy = self.insert_weekends(self.spy)
self.spy['close'] = self.spy.close.astype('float')
self.bova = self.insert_weekends(self.bova)
self.bova = self.bova.merge(self.dollar_full, on = 'date')
self.bova['close'] = self.bova.close.astype('float')
self.bova['close_dollar'] = (self.bova.close * self.bova.dollar_close).to_list()
def get_all_assets(self):
self.interests, self.fractions = list(), list()
self.domestic_tickers, self.international_tickers = list(), list()
self.domestic_options_tickers = list()
self.domestic_funds_tickers = list()
for directory in self.list_paths:
list_files = list()
for filename in listdir(directory):
if filename.endswith('.csv'):
list_files.append(path.join(directory, filename))
if directory == self.domestic_bonds_path:
self.interests.append(filename.replace('.csv', '').upper())
if directory == self.crypto_path:
self.fractions.append(filename.replace('.csv', '').upper())
if directory == self.domestic_stocks_path:
self.domestic_tickers.append(filename.replace('.csv', '').upper())
if directory == self.international_stocks_path:
self.international_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_options_path:
self.domestic_options_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_funds_path:
self.domestic_funds_tickers.append(filename.replace('.csv', '').upper())
dictionary = dict()
if directory == self.domestic_bonds_path:
for filename, interest in zip(list_files, self.interests):
df = read_csv(filename)
dictionary[interest] = df
if dictionary:
self.domestic_bonds = concat(dictionary)
self.domestic_bonds = self.domestic_bonds.rename(columns = {'pct_cdi': 'share'})
self.domestic_bonds = self.domestic_bonds.merge(self.dollar_full, on = 'date')
self.domestic_bonds['purchase_price_dollar'] = (self.domestic_bonds.purchase_price.astype('float') * self.domestic_bonds.dollar_close.astype('float')).to_list()
else:
if directory == self.crypto_path:
symbols = self.fractions
if directory == self.domestic_stocks_path:
symbols = self.domestic_tickers
if directory == self.international_stocks_path:
symbols = self.international_tickers
if directory == self.domestic_options_path:
symbols = self.domestic_options_tickers
if directory == self.domestic_funds_path:
symbols = self.domestic_funds_tickers
for filename, ticker in zip(list_files, symbols):
df = read_csv(filename)
if ticker in self.domestic_funds_tickers:
df.set_index('date', inplace = True)
df['purchase_price'] = df.purchase_price.diff()
df = df.dropna()
df.reset_index(inplace = True)
if (ticker in self.domestic_tickers) or (ticker in self.domestic_options_tickers) or (ticker in self.domestic_funds_tickers):
df = df.merge(self.dollar_full, on = 'date')
df['purchase_price'] = df.purchase_price.astype('float') * df.dollar_close.astype('float')
dictionary[ticker] = df
df['cum_share'] = df.share.cumsum()
df['price_share'] = (df.purchase_price / df.share)
df['cum_price_share'] = df.price_share.expanding().mean()
dictionary[ticker] = df
if dictionary:
self.stocks = concat(dictionary)
if directory == self.crypto_path:
self.crypto = concat(dictionary)
if directory == self.domestic_stocks_path:
self.domestic_stocks = concat(dictionary)
if directory == self.international_stocks_path:
self.international_stocks = concat(dictionary)
if directory == self.domestic_options_path:
self.domestic_options = concat(dictionary)
if directory == self.domestic_funds_path:
self.domestic_funds = concat(dictionary)
def get_quotas(self, asset):
quotas = dict()
domestic = False
if asset == 'crypto':
list_tickers = self.fractions
db = self.currency_database
if asset == 'domestic_stocks':
list_tickers = self.domestic_tickers
db = self.domestic_stocks_database
domestic = True
if asset == 'international_stocks':
list_tickers = self.international_tickers
db = self.international_database
if asset == 'domestic_options':
list_tickers = self.domestic_options_tickers
db = self.domestic_options_database
domestic = True
if asset == 'domestic_funds':
list_tickers = self.domestic_funds_tickers
domestic = True
for ticker in list_tickers:
key = ticker.upper()
if asset == 'crypto':
quotas[key] = self.crypto.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_stocks':
quotas[key] = self.domestic_stocks.loc[ticker].cum_share.iloc[-1]
if asset == 'international_stocks':
quotas[key] = self.international_stocks.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_options':
quotas[key] = self.domestic_options.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_funds':
quotas[key] = 1.
portfolio = DataFrame({
'asset': list(quotas.keys()),
'quotas': list(quotas.values())
})
portfolio.sort_values(by = ['asset'], inplace = True)
if asset == 'domestic_funds':
value_usd, value_brl = list(), list()
for asset in list_tickers:
close_price = read_csv(self.domestic_funds_path + '{}.csv'.format(asset.lower())).share.iloc[-1]
value_usd.append(close_price * quotas.get(asset) * self.dollar)
value_brl.append(close_price * quotas.get(asset))
portfolio['value_usd'] = value_usd
portfolio['value_brl'] = value_brl
else:
if domestic == False:
close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection)
else:
close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, adjusted_close as close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection)
close_price['close'] = close_price.close.astype('float')
close_price = close_price.loc[close_price.ticker.isin(portfolio.asset.to_list())]
self.dates_min = self.dates_min.append(close_price[['date', 'ticker']])
close_price['quota'] = close_price.ticker.apply(lambda x: quotas.get(x))
if domestic == False:
portfolio['value_usd'] = (close_price.close * close_price.quota).to_list()
portfolio['value_brl'] = (close_price.close * close_price.quota / self.dollar).to_list()
else:
portfolio['value_usd'] = (close_price.close * close_price.quota * self.dollar).to_list()
portfolio['value_brl'] = (close_price.close * close_price.quota).to_list()
portfolio.sort_values(by = ['value_usd'], ascending = False, inplace = True)
return portfolio
def get_portfolio(self):
self.portfolio = dict()
self.portfolio['domestic bonds'] = self.portfolio_bonds
self.portfolio['domestic stocks'] = self.portfolio_domestic_stocks
self.portfolio['international stocks'] = self.portfolio_international_stocks
self.portfolio['crypto'] = self.portfolio_crypto
# self.portfolio['domestic options'] = self.portfolio_domestic_options
self.portfolio['domestic funds'] = self.portfolio_domestic_funds
self.portfolio = concat(self.portfolio)
self.portfolio = self.portfolio.loc[self.portfolio.quotas >= 1e-10]
def get_aggregate(self):
assets = list(self.portfolio.index.unique(level = 0))
value_brl, value_usd = list(), list()
for asset in assets:
value_brl.append(self.portfolio.loc[asset].sum().value_brl)
value_usd.append(self.portfolio.loc[asset].sum().value_usd)
self.portfolio_aggregate = DataFrame({
'asset': assets,
'value_brl': value_brl,
'value_usd': value_usd,
})
def insert_weekends(self, df, asset = 'stock'):
df.set_index('date', inplace = True)
start, end = df.index[0], df.index[-1]
start = dt.strptime(start, '%Y-%m-%d').date()
end = dt.strptime(end, '%Y-%m-%d').date()
dates = [str(start + timedelta(days = x)) for x in range(0, (end - start).days + 1, 1)]
df = df.reindex(dates, fill_value = 0)
df.reset_index(inplace = True)
close = list()
if asset == '6040':
for value in df.interest:
if value != 0:
close.append(value)
if value == 0:
close.append(1.)
df['interest'] = close
if asset == 'bond':
for value in df.portfolio:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['portfolio'] = close
if asset == 'crypto':
for value in df.close:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['close'] = close
if asset == 'stock':
for value in df.close:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['close'] = close
return df
def get_concat_dataframe(self, columns, options = True):
columns_bonds = list()
for elem in columns:
if elem == 'share':
columns_bonds.append('purchase_price')
elif elem == 'purchase_price':
columns_bonds.append('purchase_price_dollar')
else:
columns_bonds.append(elem)
domestic_bonds = dict()
domestic_bonds['CDB'] = self.domestic_bonds[columns_bonds].rename(columns = {'purchase_price_dollar': 'purchase_price'})
domestic_bonds = concat(domestic_bonds)
if options == True:
df = concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns], self.domestic_options[columns]])
else:
df = concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns]])
return df
def get_portfolio_invested(self, df):
if 'date' in df.columns.to_list():
df.set_index('date', inplace = True)
start, end = df.index[0], df.index[-1]
start = dt.strptime(start, '%Y-%m-%d').date()
end = dt.strptime(end, '%Y-%m-%d').date()
reference = self.get_concat_dataframe(['date', 'purchase_price'], False)
# reference['purchase_price'] = reference.purchase_price.astype('float')
reference = reference.groupby(by = 'date')['purchase_price'].sum()
reference = DataFrame(reference).reset_index()
reference['close'] = reference.purchase_price.cumsum()
reference = reference.loc[(reference.date >= start.strftime('%Y-%m-%d')) & (reference.date <= end.strftime('%Y-%m-%d'))]
reference = self.insert_weekends(reference)
reference = reference.drop(columns = {'purchase_price'}).rename(columns = {'close': 'invested'})
ref_start = dt.strptime(reference.date.iloc[0], '%Y-%m-%d').date()
ref_end = dt.strptime(reference.date.iloc[-1], '%Y-%m-%d').date()
dates_beginning = [str(start + timedelta(days = x)) for x in range(0, (ref_start - start).days, 1)]
dates_end = [str(ref_end + timedelta(days = x)) for x in range(1, (end - ref_end).days + 1, 1)]
aux = [reference.invested.iloc[0] for _ in range(len(dates_beginning))]
aux2 = [reference.invested.iloc[-1] for _ in range(len(dates_end))]
reference = DataFrame({
'date': dates_beginning + reference.date.to_list() + dates_end,
'invested': aux + reference.invested.to_list() + aux2,
})
return reference.invested.to_list()
def get_return_benchmark_portfolio(self):
value_bond, value_bova = 400, 600
value = list()
dates = self.bova.loc[(self.bova.date >= self.start_date) & (self.bova.date <= self.end_date), 'date'].to_list()
bova_dollar = self.bova.loc[(self.bova.date >= self.start_date) & (self.bova.date <= self.end_date), 'close_dollar']
interests = self.insert_weekends(self.cdi[['date', 'interest']], asset = '6040').interest
for interest, return_bova in zip(interests, bova_dollar.pct_change().fillna(0)):
value_bond = value_bond * interest
value_bova = value_bova * (1 + return_bova)
value.append(value_bond + value_bova)
self.benchmark_portfolio = DataFrame({
'date': dates,
'portfolio': value,
})
def domestic_bond_returns(self):
end = dt_date.today().strftime('%Y-%m-%d')
self.cdi = | read_csv('../interests/cdi.csv') | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = pd.Series([], dtype="float")
self.aw_fnectar_pollen = pd.Series([], dtype="float")
self.aw_winter_nectar = pd.Series([], dtype="float")
self.aw_winter_pollen = pd.Series([], dtype="float")
self.ad_nectar = pd.Series([], dtype="float")
self.ad_pollen = pd.Series([], dtype="float")
self.aq_jelly = pd.Series([], dtype="float")
class BeerexOutputs(object):
"""
Output class for Beerex
"""
def __init__(self):
"""Class representing the outputs for Beerex"""
super(BeerexOutputs, self).__init__()
self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float")
self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float")
self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float")
self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float")
self.out_eec = pd.Series(name="out_eec", dtype="float")
self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float")
self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float")
self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float")
self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float")
self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float")
self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float")
self.out_lq1_total_dose = pd.Series(name="out_lq1_total_dose", dtype="float")
self.out_lq2_total_dose = pd.Series(name="out_lq2_total_dose", dtype="float")
self.out_lq3_total_dose = pd.Series(name="out_lq3_total_dose", dtype="float")
self.out_lq4_total_dose = pd.Series(name="out_lq4_total_dose", dtype="float")
self.out_aw_cell_total_dose = pd.Series(name="out_aw_cell_total_dose", dtype="float")
self.out_aw_brood_total_dose = pd.Series(name="out_aw_brood_total_dose", dtype="float")
self.out_aw_comb_total_dose = | pd.Series(name="out_aw_comb_total_dose", dtype="float") | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Comparing Pycytominer and Cytominer Processing
#
# We have previously processed all of the Drug Repurposing Hub Cell Painting Data using [cytominer](https://github.com/cytomining/cytominer).
# Cytominer is an R based image-based profiling tool.
# In this repo, we reprocess the data with [pycytominer](https://github.com/cytomining/pycytominer).
# As the name connotes, pycytominer is a python based image-based profiling tool.
#
# We include all processing scripts and present the pycytominer profiles in this open source repository.
# The repository represents a unified bioinformatics pipeline applied to all Cell Painting Drug Repurposing Profiles. In this notebook, we compare the resulting output data between the processing pipelines for the two tools: Cytominer and pycytominer.
# We output several metrics comparing the two approaches
#
# ## Metrics
#
# In all cases, we calculate the element-wise absolute value difference between pycytominer and cytominer profiles.
#
# 1. Mean, median, and sum of element-wise differencs
# 2. Per feature mean, median, and sum of element-wise differences
# 3. Feature selection procedure differences per feature (level 4b only)
#
# In addition, we confirm alignment of the following metadata columns:
#
# * Well
# * Broad Sample Name
# * Plate
#
# Other metadata columns are not expected to be aligned.
# For example, we have [updated MOA and Target information](https://github.com/broadinstitute/lincs-cell-painting/issues/11) in the pycytominer version.
#
# ## Data Levels
#
# Image-based profiling results in the following output data levels.
# We do not compare all data levels in this notebook.
#
# | Data | Level | Comparison |
# | :---- | :---- | :-------- |
# | Images | Level 1 | NA |
# | SQLite File (single cell profiles ) | Level 2 | NA |
# | Aggregated Profiles with Well Information (metadata) | Level 3 | Yes |
# | Normalized Aggregated Profiles with Metadata | Level 4a | Yes |
# | Normalized and Feature Selected Aggregated Profiles with Metadata | Level 4b | Yes |
# | Perturbation Profiles created Summarizing Replicates | Level 5 | No |
# In[1]:
get_ipython().run_line_magic('load_ext', 'nb_black')
# In[2]:
import os
import pathlib
import numpy as np
import pandas as pd
from util import build_file_dictionary, load_data, build_filenames
# In[3]:
def get_metrics(pycyto_df, cyto_df, features):
# Align features
pycyto_df = pycyto_df.reindex(features, axis="columns")
cyto_df = cyto_df.reindex(features, axis="columns")
# Assess difference
abs_diff = pycyto_df.subtract(cyto_df).abs()
mean_diff = abs_diff.mean()
median_diff = abs_diff.median()
sum_diff = abs_diff.sum()
complete_mean_diff = mean_diff.replace([np.inf, -np.inf], np.nan).dropna().mean()
complete_median_diff = (
median_diff.replace([np.inf, -np.inf], np.nan).dropna().mean()
)
complete_sum_diff = sum_diff.replace([np.inf, -np.inf], np.nan).dropna().sum()
return (
mean_diff,
complete_mean_diff,
median_diff,
complete_median_diff,
sum_diff,
complete_sum_diff,
)
def find_feature_diff(pycyto_df, cyto_df, plate, all_features):
all_features_df = pd.DataFrame(
["missing"] * len(all_features), index=all_features, columns=[plate]
)
pycyto_features = set(pycyto_df.columns.tolist())
cyto_features = set(cyto_df.columns.tolist())
present_both = pycyto_features.intersection(cyto_features)
all_features_df.loc[
all_features_df.index.isin(pycyto_features), plate
] = "only_pycytominer"
all_features_df.loc[
all_features_df.index.isin(cyto_features), plate
] = "only_cytominer"
all_features_df.loc[
all_features_df.index.isin(present_both), plate
] = "present_both"
return all_features_df
# In[4]:
# Set batch name
project = "2015_10_05_DrugRepurposing_AravindSubramanian_GolubLab_Broad"
batch = "2016_04_01_a549_48hr_batch1"
# Pycytominer plates are saved with 5 floating point decimals
round_decimals = 5
# Create the output directory
output_dir = pathlib.Path("results", batch)
output_dir.mkdir(parents=True, exist_ok=True)
# In[5]:
# Set input directories
# Note, pycytominer profiles are processed and exist in this repository
pycytominer_dir = pathlib.Path("../profiles/", batch)
# Note, cytominer profiles were processed separately and exist in many different locations.
# This location represents the exact files that were previously profiled using cytominer.
# The files were deposited on the Imaging Platform AWS S3 Bucket and downloaded locally.
# To reproduce the analysis, update the appropriate cytominer path.
home_dir = pycytominer_dir.home()
cytominer_dir = pathlib.Path(
f"{home_dir}/bucket/projects/{project}/workspace/backend/{batch}/"
)
# In[6]:
pycytominer_plate_files = build_file_dictionary(pycytominer_dir, tool="pycytominer")
cytominer_plate_files = build_file_dictionary(cytominer_dir, tool="cytominer")
# In[7]:
pycytominer_plates = set(sorted(pycytominer_plate_files.keys()))
cytominer_plates = set(sorted(cytominer_plate_files.keys()))
assert (
cytominer_plates == pycytominer_plates
), "Stop, not every plate is measured using both tools"
print(len(pycytominer_plates))
# In[8]:
level_3_mean_diff = []
level_3_completemean_diff = {}
level_3_median_diff = []
level_3_completemedian_diff = {}
level_3_sum_diff = []
level_3_completesum_diff = {}
level_4a_mean_diff = []
level_4a_completemean_diff = {}
level_4a_median_diff = []
level_4a_completemedian_diff = {}
level_4a_sum_diff = []
level_4a_completesum_diff = {}
level_4b_mean_diff = []
level_4b_completemean_diff = {}
level_4b_sum_diff = []
level_4b_median_diff = []
level_4b_completemedian_diff = {}
level_4b_completesum_diff = {}
level_4b_feature_select = []
test_pycytominer_select_mean_diff = []
test_pycytominer_select_completemean_diff = {}
test_pycytominer_select_sum_diff = []
test_pycytominer_select_median_diff = []
test_pycytominer_select_completemedian_diff = {}
test_pycytominer_select_completesum_diff = {}
test_pycytominer_select_feature_select = []
# Calculate metrics per plate
for plate in list(cytominer_plates):
# Calculate level 3 metrics
try:
pycyto_df, cyto_df = load_data(
plate,
pycytominer_plate_files,
cytominer_plate_files,
level="level_3",
round_decimals=round_decimals,
)
# Define features (note that the features were checked and aligned in load_data)
features = pycyto_df.columns.tolist()
# Get differences
(
mean_diff,
complete_mean_diff,
median_diff,
complete_median_diff,
sum_diff,
complete_sum_diff,
) = get_metrics(pycyto_df, cyto_df, features)
# Store results
level_3_mean_diff.append(mean_diff)
level_3_completemean_diff[plate] = complete_mean_diff
level_3_median_diff.append(median_diff)
level_3_completemedian_diff[plate] = complete_median_diff
level_3_sum_diff.append(sum_diff)
level_3_completesum_diff[plate] = complete_sum_diff
except KeyError:
continue
try:
# Calculate level 4a metrics
pycyto_df, cyto_df = load_data(
plate,
pycytominer_plate_files,
cytominer_plate_files,
level="level_4a",
round_decimals=round_decimals,
)
# Get differences
(
mean_diff,
complete_mean_diff,
median_diff,
complete_median_diff,
sum_diff,
complete_sum_diff,
) = get_metrics(pycyto_df, cyto_df, features)
# Store results
level_4a_mean_diff.append(mean_diff)
level_4a_completemean_diff[plate] = complete_mean_diff
level_4a_median_diff.append(median_diff)
level_4a_completemedian_diff[plate] = complete_median_diff
level_4a_sum_diff.append(sum_diff)
level_4a_completesum_diff[plate] = complete_sum_diff
except KeyError:
continue
try:
# Calculate level 4b metrics
pycyto_df, cyto_df = load_data(
plate,
pycytominer_plate_files,
cytominer_plate_files,
level="level_4b",
round_decimals=round_decimals,
)
# Determine feature selection differences
feature_select_df = find_feature_diff(pycyto_df, cyto_df, plate, features)
features_present_in_both = feature_select_df.loc[
feature_select_df.loc[:, plate] == "present_both", plate
].index.tolist()
# Get differences
(
mean_diff,
complete_mean_diff,
median_diff,
complete_median_diff,
sum_diff,
complete_sum_diff,
) = get_metrics(pycyto_df, cyto_df, features_present_in_both)
# Store results
level_4b_mean_diff.append(mean_diff)
level_4b_completemean_diff[plate] = complete_mean_diff
level_4b_median_diff.append(median_diff)
level_4b_completemedian_diff[plate] = complete_median_diff
level_4b_sum_diff.append(sum_diff)
level_4b_completesum_diff[plate] = complete_sum_diff
level_4b_feature_select.append(feature_select_df)
except KeyError:
continue
try:
# Test pycytominer feature selection
pycyto_df, cyto_df = load_data(
plate,
pycytominer_plate_files,
cytominer_plate_files,
level="pycytominer_select",
round_decimals=round_decimals,
)
# Define features (note that the features were checked and aligned in load_data)
features = pycyto_df.columns.tolist()
# Get differences
(
mean_diff,
complete_mean_diff,
median_diff,
complete_median_diff,
sum_diff,
complete_sum_diff,
) = get_metrics(pycyto_df, cyto_df, features)
# Store results
test_pycytominer_select_mean_diff.append(mean_diff)
test_pycytominer_select_completemean_diff[plate] = complete_mean_diff
test_pycytominer_select_median_diff.append(median_diff)
test_pycytominer_select_completemedian_diff[plate] = complete_median_diff
test_pycytominer_select_sum_diff.append(sum_diff)
test_pycytominer_select_completesum_diff[plate] = complete_sum_diff
except KeyError:
continue
# ## Compile Results
# In[9]:
missing_plate = list(
set(cytominer_plates).difference(set(list(level_3_completemean_diff.keys())))
)
level_3_plates = list(cytominer_plates)
level_3_plates.remove(missing_plate[0])
level_3_mean_diff_df = pd.concat(level_3_mean_diff, axis="columns", sort=True)
level_3_mean_diff_df.columns = level_3_plates
level_3_completemean_diff_df = pd.DataFrame(
level_3_completemean_diff, index=["complete_mean_diff"]
).transpose()
level_3_median_diff_df = pd.concat(level_3_median_diff, axis="columns", sort=True)
level_3_median_diff_df.columns = level_3_plates
level_3_completemedian_diff_df = pd.DataFrame(
level_3_completemedian_diff, index=["complete_median_diff"]
).transpose()
level_3_sum_diff_df = pd.concat(level_3_sum_diff, axis="columns", sort=True)
level_3_sum_diff_df.columns = level_3_plates
level_3_completesum_diff_df = pd.DataFrame(
level_3_completesum_diff, index=["complete_sum_diff"]
).transpose()
# In[10]:
missing_plate = list(
set(cytominer_plates).difference(set(list(level_4a_completemean_diff.keys())))
)
level_4a_plates = list(cytominer_plates)
level_4a_plates.remove(missing_plate[0])
level_4a_mean_diff_df = | pd.concat(level_4a_mean_diff, axis="columns") | pandas.concat |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = 'Input must be a list / sequence of tuple-likes.'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='abc')
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='a')
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]],
columns=['a', 'b', 'c']).set_index(['a', 'b'])
idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b'))
result = pd.DataFrame([2, 3], columns=['c'], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
@pytest.mark.parametrize('first, second', [
([], []),
(['foo', 'bar', 'baz'], []),
([], ['a', 'b', 'c']),
])
def test_from_product_empty_two_levels(first, second):
names = ['A', 'B']
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
codes=[[], []], names=names)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
#io
from pathlib import Path
from io import StringIO
import logging
import fnmatch
# calcs
import warnings
import math
import numpy as np
np.seterr(all='print')
import pandas as pd
from scipy.stats import linregress
# app
import methylcheck
LOGGER = logging.getLogger(__name__)
__all__ = ['ControlsReporter', 'controls_report']
class ControlsReporter():
"""Class used by controls_report() to produce XLSX summary of control probe performance.
This will load all the methylprep control and raw output data, then perform the calculations recommended by manufacturer
then produce a tidy color-coded XLSX file with results. This function is analogous to methylcheck.plot_controls() except that the output if a color-coded excel sheet instead of charts.
Last column "Result" will include OK (green), MARGINAL (yellow), or FAIL (RED) -- as a summary of all other tests.
If there is a meta data pickle, and there's a Sex or Gender column, it will compare SEX.
Sex in samplesheet must be reported as "M" or "F" not 0 or 1, or "Male"/"Female" etc.
Otherwise, it just runs and predicts the sex for non-mouse arrays.
Note on GCT scores: this uses noob_meth instead of raw, uncorrected meth values to calculate, but the result should be nearly the same.
"""
input_filenames = {
'control_probes.pkl': 'control',
'poobah_values.pkl': 'poobah',
# FUTURE?? load these separately, and only if there is a reason to run a sex prediction. But sex done by default.
'noob_meth_values.pkl': 'noob_meth',
'noob_unmeth_values.pkl': 'noob_unmeth',
}
samplesheet_patterns = {
'*meta_data*.pkl': 'samplesheet', # meta_data is used first, if both available
'*samplesheet*.csv': 'samplesheet',
'*sample_sheet*.csv': 'samplesheet',
}
# does NOT use m_values or 'beta_values.pkl'
legacy_columns = {
# NONE tells code not to include in legacy mode. Sample column is handled in special way.
#'Sample': 'Sample Name', # leave off; would need to come from sample sheet meta data
#ADD Sentrix Barcode, Sentrix Position from pkl columns
'Sample': None,
'Restoration Green': 'Restoration',
'Staining Green': 'StainingGreen',
'Staining Red': 'StainingRed',
'Extension Green': 'ExtensionGreen',
'Extension Red': 'ExtensionRed',
'Hybridization Green (High/Medium)': 'HybridizationHighMedium',
'Hybridization Green (Medium/Low)': 'HybridizationMediumLow',
'Target Removal Green 1': 'TargetRemoval1',
'Target Removal Green 2': 'TargetRemoval2',
'Bisulfite Conversion I Green C/U': 'BisulfiteConversion1Green',
'Bisulfite Conversion I Green bkg/U': 'BisulfiteConversion1BackgroundGreen',
'Bisulfite Conversion I Red C/U': 'BisulfiteConversion1Red',
'Bisulfite Conversion I Red bkg/U': 'BisulfiteConversion1BackgroundRed',
'Bisulfite Conversion II Red/Green': 'BisulfiteConversion2',
'Bisulfite Conversion II bkg/Green': 'BisulfiteConversion2Background',
'Specificity I Green': 'Specificity1Green',
'Specificity I Red': 'Specificity1Red',
'Specificity II': 'Specificity2',
'Specificity II Bkg': 'Specificity2Background',
'Non-polymorphic Green': 'NonPolymorphicGreen',
'Non-polymorphic Red': 'NonPolymorphicRed',
# additional columns that WON'T appear in legacy report
'Baseline Green': None,
'Baseline Red': None,
'Negative Baseline G': None,
'Negative Baseline R': None,
'NORM_A': None,
'NORM_T': None,
'NORM_C': None,
'NORM_G': None,
'Result': None,
'Passing Probes': None,
'Regression NORM_GA': None,
'Regression NORM_CT': None,
'Predicted Sex': None,
'Sex Match': None,
'GCT score': None,
}
untestable_columns = ['Baseline Green',
'Baseline Red', 'Negative Baseline G',
'Negative Baseline R', 'NORM_A','NORM_T',
'NORM_C', 'NORM_G',
'Passing Probes',
'Result',
'Predicted Sex']
def __init__(self, filepath, outfilepath=None, bg_offset=3000, cutoff_adjust=1.0, colorblind=False,
roundoff=2, legacy=False, pval=True, pval_sig=0.05, passing=0.7, project_name=None):
self.filepath = filepath # folder with methyprep processed data
self.bg_offset = bg_offset
self.cut = cutoff_adjust # for adjusting minimum passing per test
self.legacy = legacy # how output XLSX should be formatted
self.roundoff = 1 if self.legacy else roundoff
self.pval = pval # whether to include poobah in tests
self.pval_sig = pval_sig # significance level to define a failed probe
self.passing = passing # fraction of tests that all need to pass for sample to pass
self.project_name = project_name # used to name the QC report, if defined.
# if outfilepath is not provided, saves to the same folder where the pickled dataframes are located.
if not outfilepath:
self.outfilepath = filepath
else:
self.outfilepath = outfilepath
for filename in Path(filepath).rglob('*.pkl'):
if filename.name in self.input_filenames.keys():
setattr(self, self.input_filenames[filename.name], pd.read_pickle(filename))
# fuzzy matching samplesheet
for filename in Path(filepath).rglob('*'):
if any([fnmatch.fnmatch(filename.name, samplesheet_pattern) for samplesheet_pattern in self.samplesheet_patterns.keys()]):
#label = next(label for (patt,label) in samplesheet_patterns.items() if fnmatch.fnmatch(filename.name, patt))
if '.pkl' in filename.suffixes:
setattr(self, 'samplesheet', pd.read_pickle(filename))
elif '.csv' in filename.suffixes:
try:
from methylprep.files import SampleSheet
except ImportError:
raise ImportError("parsing a sample sheet CSV requires `methylprep` be installed first.")
#uses methylprep.files.SampleSheet() instead of --- pd.read_csv(filename) --- to support legacy [header] format(s).
sample_sheet = SampleSheet(filename, filepath)
setattr(self, 'samplesheet', sample_sheet._SampleSheet__data_frame)
break
if not hasattr(self,'control'):
raise FileNotFoundError(f"Could not locate control_probes.pkl file in {filepath}")
if not hasattr(self,'poobah') and self.pval is True:
raise FileNotFoundError(f"Could not locate poobah_values.pkl file in {filepath}; re-run and set 'pval=False' to skip calculating probe failures.")
if hasattr(self,'samplesheet'):
if isinstance(self.samplesheet, pd.DataFrame):
# methylprep v1.5.4-6 was creating meta_data files with two Sample_ID columns. Check and fix here:
if any(self.samplesheet.columns.duplicated()):
self.samplesheet = self.samplesheet.loc[:, ~self.samplesheet.columns.duplicated()]
LOGGER.info("Removed a duplicate Sample_ID column in samplesheet")
if 'Sample_ID' in self.samplesheet:
self.samplesheet = self.samplesheet.set_index('Sample_ID')
elif 'Sentrix_ID' in self.samplesheet and 'Sentrix_Position' in self.samplesheet:
self.samplesheet['Sample_ID'] = self.samplesheet['Sentrix_ID'].astype(str) + '_' + self.samplesheet['Sentrix_Position']
self.samplesheet = self.samplesheet.set_index('Sample_ID')
else:
raise TypeError("Meta Data from Samplesheet is not a valid dataframe.")
if (hasattr(self,'samplesheet') and
(any(('Gender' in item.title()) for item in self.samplesheet.columns) or
any(('Sex' in item.title()) for item in self.samplesheet.columns))):
self.predict_sex = True
# make sure case is correct
if ('Gender' in self.samplesheet.columns or 'Sex' in self.samplesheet.columns):
pass
else:
self.samplesheet.columns = [(col.title() if col.lower() in ('sex','gender') else col) for col in self.samplesheet.columns]
else:
self.predict_sex = False
#if hasattr(self,'samplesheet') and self.predict_sex is False:
#pass # I could add user info that explains why there won't be a sex prediction column.
self.norm_regressions = {} # sample : all data from calc
self.sex_extra = {} # sample: all data from get_sex()
self.report = [] # will convert to DF after collecting data; faster; pd.DataFrame(columns=self.report_columns)
self.formulas = {} # col: formula as string/note
self.data = {} # sample: {col: <colname>, val: ___, pass: ...} for coloring boxes
if colorblind:
self.cpass = '#EDA247'
self.cmid = '#FFDD71'
self.cfail = '#57C4AD'
else:
self.cpass = '#F26C64'
self.cmid = '#FFDD71'
self.cfail = '#69B764'
def process_sample(self, sample, con):
""" process() will run this throug all samples, since structure of control data is a dict of DFs
bg_offset = Background correction offset.
Default value: 3000
(applies to all background calculations, indicated with (bkg +x).)
NEGATIVE control probes are used as the baseline for p-val calculations.
see also infinium-hd-methylation-guide-15019519-01.pdf for list of expected intensity per type
MOUSE conversions (or proxy)
baseline_G Extension -- missing -- use NEGATIVE Hairpin probes as proxy
-- maybe take average of NORM_A/NORM_T (Green) as proxy for background?
baseline_R Extension -- missing -- NORM_C + NORM_G Red proxy
BIS I II OK
SPEC OK
RESTORATION OK
non-poly OK
hyb OK HIGH = 3_HIGH_MM_50.1_1, mid/low? 90_YEAST_3MM_50.1_1
within NATIVE
non_specific
GT mismatch
NO staining found
target removal
"""
# to get a list of these probes, use
# con[(~con['Control_Type'].isna()) & (~con['Control_Type'].isin(['NEGATIVE','NORM_A','NORM_T','NORM_C','NORM_G']))][['Control_Type','Extended_Type','Color']]
# baseline = (Extension Green highest A or T intensity) + offset
mouse = False
try:
baseline_G = max([con[con['Extended_Type'] == 'Extension (A)']['Mean_Value_Green'].values[0], con[con['Extended_Type'] == 'Extension (T)']['Mean_Value_Green'].values[0] ]) + self.bg_offset
baseline_R = max([con[con['Extended_Type'] == 'Extension (C)']['Mean_Value_Red'].values[0], con[con['Extended_Type'] == 'Extension (G)']['Mean_Value_Red'].values[0] ]) + self.bg_offset
except: # assume mouse
mouse = True
baseline_G = con[con['Extended_Type'] == 'T_Hairpin2.1_1']['Mean_Value_Green'].values[0] + self.bg_offset
baseline_R = con[con['Extended_Type'] == 'G_Hairpin2.1_1']['Mean_Value_Red'].values[0] + self.bg_offset
# ("Green"/(bkg+x)) > 0* | restoration_green is Green Channel Intensity/Background.
self.restoration_green = round( con[con['Extended_Type'].isin(['Restore','neg_ALDOB_3915-4004_1'])]['Mean_Value_Green'].values[0] / baseline_G, self.roundoff)
if mouse:
self.staining_green = np.nan
self.staining_red = np.nan
else:
# (Biotin High/Biotin Bkg) > 5
self.staining_green = round( con[con['Extended_Type'] == 'Biotin (High)']['Mean_Value_Green'].values[0] / con[con['Extended_Type'] == 'Biotin (Bkg)']['Mean_Value_Green'].values[0], self.roundoff)
# (DNP High > DNP Bkg) > 5
self.staining_red = round( con[con['Extended_Type'] == 'DNP (High)']['Mean_Value_Red'].values[0] / con[con['Extended_Type'] == 'DNP (Bkg)']['Mean_Value_Red'].values[0], self.roundoff)
if mouse:
self.extension_green = round( con[con['Extended_Type'] == 'G_Hairpin2.1_1']['Mean_Value_Green'].values[0] / con[con['Extended_Type'] == 'T_Hairpin2.1_1']['Mean_Value_Green'].values[0], self.roundoff)
self.extension_red = round( con[con['Extended_Type'] == 'T_Hairpin2.1_1']['Mean_Value_Red'].values[0] / con[con['Extended_Type'] == 'G_Hairpin2.1_1']['Mean_Value_Red'].values[0], self.roundoff)
else:
# GREEN min(C or G)/max(A or T) > 5
self.extension_green = round( min( con[con['Extended_Type'] == 'Extension (C)']['Mean_Value_Green'].values[0], con[con['Extended_Type'] == 'Extension (G)']['Mean_Value_Green'].values[0]) / max( con[con['Extended_Type'] == 'Extension (A)']['Mean_Value_Green'].values[0], con[con['Extended_Type'] == 'Extension (T)']['Mean_Value_Green'].values[0]), self.roundoff)
# RED max(C or G)/min(A or T) > 5
self.extension_red = round( min( con[con['Extended_Type'] == 'Extension (A)']['Mean_Value_Red'].values[0], con[con['Extended_Type'] == 'Extension (T)']['Mean_Value_Red'].values[0]) / max( con[con['Extended_Type'] == 'Extension (C)']['Mean_Value_Red'].values[0], con[con['Extended_Type'] == 'Extension (G)']['Mean_Value_Red'].values[0]), self.roundoff)
if mouse:
# Hyb (High/Med)
self.hybridization_green_A = round( con[con['Extended_Type']=='3_HIGH_MM_50.1_1']['Mean_Value_Green'].values[0] / con[con['Extended_Type']=='90_YEAST_3MM_50.1_1']['Mean_Value_Green'].values[0], self.roundoff)
self.hybridization_green_B = np.nan
else:
# Hyb (High/Med) > 1
self.hybridization_green_A = round( con[con['Extended_Type']=='Hyb (High)']['Mean_Value_Green'].values[0] / con[con['Extended_Type']=='Hyb (Medium)']['Mean_Value_Green'].values[0], self.roundoff)
# Hyb (Med/Low) > 1
self.hybridization_green_B = round( con[con['Extended_Type']=='Hyb (Medium)']['Mean_Value_Green'].values[0] / con[con['Extended_Type']=='Hyb (Low)']['Mean_Value_Green'].values[0], self.roundoff)
# Hyb (High > Med > Low)
#self.hybridization_green_C = round( con[con['Extended_Type']=='Hyb (High)']['Mean_Value_Green'].values[0] / con[con['Extended_Type']=='Hyb (Medium)']['Mean_Value_Green'].values[0], self.roundoff)
if mouse:
self.target_removal_green_1 = np.nan
self.target_removal_green_2 = np.nan
else:
# Target ctrl 1 ≤ bkg
self.target_removal_green_1 = round( baseline_G / con[con['Extended_Type'] == 'Target Removal 1']['Mean_Value_Green'].values[0], self.roundoff)
# Target ctrl 2 ≤ bkg
self.target_removal_green_2 = round( baseline_G / con[con['Extended_Type'] == 'Target Removal 2']['Mean_Value_Green'].values[0], self.roundoff)
# con[con['Extended_Type']=='BS_Conversion_I_24_1']['Mean_Value_Green'].values[0],
if mouse:
# BS_Conversion_I_54_1, BS_Conversion_I_55_1, BS_Conversion_I_24_1,
# BS_Conversion_II_5_1, BS_Conversion_II_21_1, BS_Conversion_I_17_1, BS_Conversion_I_72_1
# higher: 54, 17
# lower: 24, 55, 72
self.bisulfite_conversion_I_green_CU = round(
con[con['Extended_Type']=='BS_Conversion_I_54_1']['Mean_Value_Green'].values[0] /
con[con['Extended_Type']=='BS_Conversion_I_55_1']['Mean_Value_Green'].values[0], self.roundoff)
self.bisulfite_conversion_I_green_bkg_U = round( baseline_G / con[con['Extended_Type']=='BS_Conversion_I_55_1']['Mean_Value_Green'].values[0], self.roundoff)
self.bisulfite_conversion_I_red_CU = round(
con[con['Extended_Type']=='BS_Conversion_I_54_1']['Mean_Value_Red'].values[0] /
con[con['Extended_Type']=='BS_Conversion_I_55_1']['Mean_Value_Red'].values[0], self.roundoff)
self.bisulfite_conversion_I_red_bkg_U = round( baseline_R / con[con['Extended_Type']=='BS_Conversion_I_55_1']['Mean_Value_Red'].values[0], self.roundoff)
self.bisulfite_conversion_II_red_ratio = round( min([
con[con['Extended_Type']=='BS_Conversion_II_5_1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS_Conversion_II_21_1']['Mean_Value_Red'].values[0]
]) / max([
con[con['Extended_Type']=='BS_Conversion_II_5_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS_Conversion_II_21_1']['Mean_Value_Green'].values[0]
]), self.roundoff)
self.bisulfite_conversion_II_green_bkg = round( baseline_G / max([
con[con['Extended_Type']=='BS_Conversion_II_5_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS_Conversion_II_21_1']['Mean_Value_Green'].values[0]
]), self.roundoff)
else:
# BS min(C1, 2, or 3) / BS max(U1, 2, 3) > 1
# META NOTE: BS Conversion I-C1 is "I C1" in 450k. U1 also differs.
# META NOTE: I had to ignore I-C3 and I-U3, as these always gave really low intensity outputs. The C1/U2 combo seems to work in practice.
self.bisulfite_conversion_I_green_CU = round( min([
con[con['Extended_Type'].isin(['BS Conversion I-C1','BS Conversion I C1'])]['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion I-C2']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='BS Conversion I-C3']['Mean_Value_Green'].values[0]
]) / max([
con[con['Extended_Type'].isin(['BS Conversion I-U1','BS Conversion I U1'])]['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion I-U2']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='BS Conversion I-U3']['Mean_Value_Green'].values[0]
]), self.roundoff)
# Bisulfite Conversion I Green U ≤ bkg | ((bkg + x)/U) > 1
self.bisulfite_conversion_I_green_bkg_U = round( baseline_G / max([
con[con['Extended_Type'].isin(['BS Conversion I-U1','BS Conversion I U1'])]['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion I-U2']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion I-U3']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion I-U4']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion I-U5']['Mean_Value_Green'].values[0]
]), self.roundoff)
# Bisulfite Conversion I Red (C4, 5, 6) / (U4, 5, 6) > 1
self.bisulfite_conversion_I_red_CU = round( min([
con[con['Extended_Type'].isin(['BS Conversion I-C4','BS Conversion I C4'])]['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion I-C5']['Mean_Value_Red'].values[0],
#con[con['Extended_Type']=='BS Conversion I-C6']['Mean_Value_Red'].values[0]
]) / max([
con[con['Extended_Type'].isin(['BS Conversion I-U4','BS Conversion I U4'])]['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion I-U5']['Mean_Value_Red'].values[0],
#con[con['Extended_Type']=='BS Conversion I-U6']['Mean_Value_Red'].values[0]
]), self.roundoff)
# Bisulfite Conversion I Red U ≤ bkg | ((bkg + x)/U) > 1
self.bisulfite_conversion_I_red_bkg_U = round( baseline_R / max([con[con['Extended_Type'].isin(['BS Conversion I-U1','BS Conversion I U1'])]['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion I-U2']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion I-U3']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion I-U4']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion I-U5']['Mean_Value_Red'].values[0]
]), self.roundoff)
#### min & max derived by comparing with expected output, because guide manufacturer's reference guide was unclear here.
# Bisulfite Conversion II min(Red) > C max(Green)
self.bisulfite_conversion_II_red_ratio = round( min([
con[con['Extended_Type']=='BS Conversion II-1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion II-2']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion II-3']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='BS Conversion II-4']['Mean_Value_Red'].values[0]
]) / max([
con[con['Extended_Type']=='BS Conversion II-1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion II-2']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion II-3']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion II-4']['Mean_Value_Green'].values[0]
]), self.roundoff)
# BiSulfite Conversion II C green ≤ bkg | (bkg + x)/ max(Green) > 1
self.bisulfite_conversion_II_green_bkg = round( baseline_G / max([
con[con['Extended_Type']=='BS Conversion II-1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion II-2']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion II-3']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='BS Conversion II-4']['Mean_Value_Green'].values[0]
]), self.roundoff)
if mouse:
# Non_Specific_I_11_1, Non_Specific_I_24_1, Non_Specific_I_3_1,
# Non_Specific_II_1_1, Non_Specific_II_17_1, GT_mismatch_ATG2_12259-12348_1
# Non_Specific_I_30_1, Non_Specific_I_20_1, Non_Specific_I_9_1,
# Non_Specific_I_15_1, Non_Specific_I_14_1, Non_Specific_I_42_1
self.specificity_I_green = round(
con[con['Extended_Type']=='Non_Specific_I_42_1']['Mean_Value_Green'].values[0]
/ max(
con[con['Extended_Type']=='Non_Specific_I_24_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Non_Specific_I_11_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Non_Specific_I_3_1']['Mean_Value_Green'].values[0],
), self.roundoff)
self.specificity_I_red = round(
con[con['Extended_Type']=='Non_Specific_I_42_1']['Mean_Value_Red'].values[0]
/ max([
con[con['Extended_Type']=='Non_Specific_I_24_1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='Non_Specific_I_11_1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='Non_Specific_I_3_1']['Mean_Value_Red'].values[0],
]), self.roundoff)
self.specificity_II = round( min([
con[con['Extended_Type']=='Non_Specific_II_17_1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='Non_Specific_II_1_1']['Mean_Value_Red'].values[0],
]) / max([
con[con['Extended_Type']=='Non_Specific_II_17_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Non_Specific_II_1_1']['Mean_Value_Green'].values[0],
]), self.roundoff)
self.specificity_II_bkg= round( baseline_G / max([
con[con['Extended_Type']=='Non_Specific_II_17_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Non_Specific_II_1_1']['Mean_Value_Green'].values[0],
]), self.roundoff)
else:
# ignoring controls 4,5,6 gave me the expected output
# Specificity I Green (min(PM)/max(MM)) > 1
self.specificity_I_green = round( min([
con[con['Extended_Type']=='GT Mismatch 1 (PM)']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='GT Mismatch 2 (PM)']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='GT Mismatch 3 (PM)']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='GT Mismatch 4 (PM)']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='GT Mismatch 5 (PM)']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='GT Mismatch 6 (PM)']['Mean_Value_Green'].values[0],
]) / max([
con[con['Extended_Type']=='GT Mismatch 1 (MM)']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='GT Mismatch 2 (MM)']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='GT Mismatch 3 (MM)']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='GT Mismatch 4 (MM)']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='GT Mismatch 5 (MM)']['Mean_Value_Green'].values[0],
#con[con['Extended_Type']=='GT Mismatch 6 (MM)']['Mean_Value_Green'].values[0],
]), self.roundoff)
# ignoring controls 1,2,3 here gave me the expected result
# Specificity I Red (min(PM)/max(MM)) > 1
self.specificity_I_red = round( min([
#con[con['Extended_Type']=='GT Mismatch 1 (PM)']['Mean_Value_Red'].values[0],
#con[con['Extended_Type']=='GT Mismatch 2 (PM)']['Mean_Value_Red'].values[0],
#con[con['Extended_Type']=='GT Mismatch 3 (PM)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='GT Mismatch 4 (PM)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='GT Mismatch 5 (PM)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='GT Mismatch 6 (PM)']['Mean_Value_Red'].values[0],
]) / max([
#con[con['Extended_Type']=='GT Mismatch 1 (MM)']['Mean_Value_Red'].values[0],
#con[con['Extended_Type']=='GT Mismatch 2 (MM)']['Mean_Value_Red'].values[0],
#con[con['Extended_Type']=='GT Mismatch 3 (MM)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='GT Mismatch 4 (MM)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='GT Mismatch 5 (MM)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='GT Mismatch 6 (MM)']['Mean_Value_Red'].values[0],
]), self.roundoff)
# Specificity 1, Specificity 2, Specificity 3
# Specificity II (S Red/ S Green) > 1
self.specificity_II = round( min([
con[con['Extended_Type']=='Specificity 1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='Specificity 2']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='Specificity 3']['Mean_Value_Red'].values[0],
]) / max([
con[con['Extended_Type']=='Specificity 1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Specificity 2']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Specificity 3']['Mean_Value_Green'].values[0],
]), self.roundoff)
# Specificity II (background/ Spec Green) > 1
self.specificity_II_bkg = round( baseline_G / max([
con[con['Extended_Type']=='Specificity 1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Specificity 2']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='Specificity 3']['Mean_Value_Green'].values[0],
]), self.roundoff)
if mouse:
self.nonpolymorphic_green_lowCG_highAT = round( min([
con[con['Extended_Type']=='nonPolyG_PPIH_9298-9387_1']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='nonPolyC_PPIE_21091-21180_1']['Mean_Value_Green'].values[0],
]) / max([
con[con['Extended_Type']=='nonPolyT_ALDOB_10349-10438_1']['Mean_Value_Green'].values[0]
]), self.roundoff)
self.nonpolymorphic_red_lowAT_highCG = round( min([
con[con['Extended_Type']=='nonPolyT_ALDOB_10349-10438_1']['Mean_Value_Red'].values[0],
]) / max([
con[con['Extended_Type']=='nonPolyC_PPIE_21091-21180_1']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='nonPolyG_PPIH_9298-9387_1']['Mean_Value_Red'].values[0],
]), self.roundoff)
else:
# Nonpolymorphic Green (min(CG)/ max(AT)) > 5
self.nonpolymorphic_green_lowCG_highAT = round( min([
con[con['Extended_Type']=='NP (C)']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='NP (G)']['Mean_Value_Green'].values[0],
]) / max([
con[con['Extended_Type']=='NP (A)']['Mean_Value_Green'].values[0],
con[con['Extended_Type']=='NP (T)']['Mean_Value_Green'].values[0],
]), self.roundoff)
# Nonpolymorphic Red (min(AT)/ max(CG)) > 5
self.nonpolymorphic_red_lowAT_highCG = round( min([
con[con['Extended_Type']=='NP (A)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='NP (T)']['Mean_Value_Red'].values[0],
]) / max([
con[con['Extended_Type']=='NP (C)']['Mean_Value_Red'].values[0],
con[con['Extended_Type']=='NP (G)']['Mean_Value_Red'].values[0],
]), self.roundoff)
# ADDITIONAL tests
self.negative_control_mean_green = round( np.mean(
con[con['Control_Type'] == 'NEGATIVE']['Mean_Value_Green'].values,
))
self.negative_control_mean_red = round( np.mean(
con[con['Control_Type'] == 'NEGATIVE']['Mean_Value_Red'].values,
))
# The Illumina MethylationEPIC BeadChip contains 85 pairs of internal normalization control
# probes (name with prefix NORM_A, NORM_T, NORM_G or NORM_C), while its predecessor, Illumina
# HumanMethyl-ation450 BeadChip contains 93 pairs. RELIC first performs a regression on the
# logarithms of the intensity values of the normalization control probes to derive a quantitative
# relationship between red and green channels, and then uses the relationship to correct for
# dye-bias on intensity values for whole array.
# https://rdrr.io/bioc/ENmix/man/relic.html
if mouse:
GA = ['Norm_G38_1', 'Norm_G72_1', 'Norm_G77_1', 'Norm_A38_1', 'Norm_A72_1', 'Norm_A77_1']
LinregressResult_GA = linregress(
con[(con['Control_Type'] == 'NORM_G') & (con['Extended_Type'].isin(GA))].sort_values(by='Extended_Type')['Mean_Value_Green'].values,
con[(con['Control_Type'] == 'NORM_A') & (con['Extended_Type'].isin(GA))].sort_values(by='Extended_Type')['Mean_Value_Red'].values,
)
self.regression_NORM_GA = round(LinregressResult_GA.rvalue,2)
CT = ['Norm_C12_1', 'Norm_C82_1', 'Norm_C84_1', 'Norm_C86_1', 'Norm_C93_1', 'Norm_C99_1',
'Norm_T12_1', 'Norm_T82_1', 'Norm_T84_1', 'Norm_T86_1', 'Norm_T93_1', 'Norm_T99_1']
LinregressResult_CT = linregress(
con[(con['Control_Type'] == 'NORM_C') & (con['Extended_Type'].isin(CT))].sort_values(by='Extended_Type')['Mean_Value_Green'].values,
con[(con['Control_Type'] == 'NORM_T') & (con['Extended_Type'].isin(CT))].sort_values(by='Extended_Type')['Mean_Value_Red'].values,
)
self.regression_NORM_CT = round(LinregressResult_CT.rvalue,2)
print(f"{sample} GA r={round(LinregressResult_GA.rvalue,2)} ±{round(LinregressResult_GA.stderr,2)} p<{round(LinregressResult_GA.pvalue,5)} |CT r={round(LinregressResult_CT.rvalue,2)} ±{round(LinregressResult_CT.stderr,2)} p<{round(LinregressResult_CT.pvalue,5)}")
self.norm_regressions[sample] = {
'GA': {'rvalue': round(LinregressResult_GA.rvalue,2),
'pvalue': round(LinregressResult_GA.pvalue,2),
'stderr': round(LinregressResult_GA.stderr,2),
'slope': round(LinregressResult_GA.slope,2),
'intercept': round(LinregressResult_GA.intercept,2),
},
'CT': {'rvalue': round(LinregressResult_CT.rvalue,2),
'pvalue': round(LinregressResult_CT.pvalue,2),
'stderr': round(LinregressResult_CT.stderr,2),
'slope': round(LinregressResult_CT.slope,2),
'intercept': round(LinregressResult_CT.intercept,2),
},
}
"""
this gave terrible results, because probes are apples and oranges:
drop 2 from T
drop 1 from A
CT = [12, 82, 84, 86, 93, 99]
['Norm_T11_1', 'Norm_T12_1', 'Norm_T15_1', 'Norm_T18_1',
'Norm_T1_1', 'Norm_T23_1', 'Norm_T24_1', 'Norm_T26_1', 'Norm_T3_1',
'Norm_T45_1', 'Norm_T48_1', 'Norm_T4_1', 'Norm_T60_1',
'Norm_T62_1', 'Norm_T6_1', 'Norm_T73_1', 'Norm_T81_1',
'Norm_T82_1', 'Norm_T83_1', 'Norm_T84_1', 'Norm_T86_1',
'Norm_T93_1', 'Norm_T96_1', 'Norm_T99_1']
['Norm_C12_1', 'Norm_C13_1', 'Norm_C19_1', 'Norm_C34_1',
'Norm_C36_1', 'Norm_C43_1', 'Norm_C44_1', 'Norm_C45_1',
'Norm_C48_1', 'Norm_C49_1', 'Norm_C57_1', 'Norm_C61_1',
'Norm_C65_1', 'Norm_C73_1', 'Norm_C74_1', 'Norm_C80_1',
'Norm_C82_1', 'Norm_C84_1', 'Norm_C86_1', 'Norm_C90_1',
'Norm_C93_1', 'Norm_C99_1']
GA = [38, 72, 77]
['Norm_G28_1', 'Norm_G31_1', 'Norm_G35_1', 'Norm_G38_1',
'Norm_G50_1', 'Norm_G61_1', 'Norm_G72_1', 'Norm_G77_1',
'Norm_G91_1']
['Norm_A14_1', 'Norm_A15_1', 'Norm_A38_1', 'Norm_A49_1',
'Norm_A65_1', 'Norm_A72_1', 'Norm_A77_1', 'Norm_A79_1',
'Norm_A81_1', 'Norm_A95_1']
"""
else:
LinregressResult_GA = linregress(
con[con['Control_Type'] == 'NORM_G'].sort_values(by='Extended_Type')['Mean_Value_Green'].values,
con[con['Control_Type'] == 'NORM_A'].sort_values(by='Extended_Type')['Mean_Value_Red'].values,
)
self.regression_NORM_GA = round(LinregressResult_GA.rvalue,2)
LinregressResult_CT = linregress(
con[con['Control_Type'] == 'NORM_C'].sort_values(by='Extended_Type')['Mean_Value_Green'].values,
con[con['Control_Type'] == 'NORM_T'].sort_values(by='Extended_Type')['Mean_Value_Red'].values,
)
self.regression_NORM_CT = round(LinregressResult_CT.rvalue,2)
print(f"{sample} GA r={round(LinregressResult_GA.rvalue,2)} ±{round(LinregressResult_GA.stderr,2)} p<{round(LinregressResult_GA.pvalue,5)} |CT r={round(LinregressResult_CT.rvalue,2)} ±{round(LinregressResult_CT.stderr,2)} p<{round(LinregressResult_CT.pvalue,5)}")
# BELOW: including mean values is less useful than providing the regression coefficient for cross-channel linearity.
#self.norm_A_mean_green = round( np.mean(
#con[con['Control_Type'] == 'NORM_A']['Mean_Value_Green'].values,
#))
#self.norm_T_mean_green = round( np.mean(
#con[con['Control_Type'] == 'NORM_T']['Mean_Value_Green'].values,
#))
#self.norm_C_mean_red = round( np.mean(
#con[con['Control_Type'] == 'NORM_C']['Mean_Value_Red'].values,
#))
#self.norm_G_mean_red = round( np.mean(
#con[con['Control_Type'] == 'NORM_G']['Mean_Value_Red'].values,
#))
if hasattr(self,'noob_meth') and hasattr(self,'gct_scores') and self.gct_scores.get(sample) is not None:
# sample var should match a column in noob_meth for this to work. And it only runs this function once per batch.
self.gct_score = self.gct_scores[sample]
else:
self.gct_score = None
# had to flip this to >80% passing, because all tests are ABOVE thresholds
if hasattr(self,'poobah') and isinstance(self.poobah, pd.DataFrame) and sample in self.poobah.columns:
self.failed_probes = round( 100*len(self.poobah[sample][ self.poobah[sample] > self.pval_sig ]) / len(self.poobah[sample]), 1)
else:
self.failed_probes = 0 # will be removed later
self.data[sample] = [
{'col': 'Restoration Green', 'val': self.restoration_green, 'min': -1, 'mid': -0.1, 'max':0, 'formula': "(Restore(Green)/ background) > 0"},
{'col': 'Staining Green', 'val': self.staining_green, 'max':(5*self.cut), 'formula': f"(Biotin High/ Biotin background) > {5*self.cut}"},
{'col': 'Staining Red', 'val': self.staining_red, 'max':(5*self.cut), 'formula': f"(DNP High/ DNP background) > {5*self.cut}"},
{'col': 'Extension Green', 'val':self.extension_green, 'max':(5*self.cut), 'formula': f"min(C or G)/ max(A or T) > {5*self.cut}"},
{'col': 'Extension Red', 'val':self.extension_red, 'max':(5*self.cut), 'formula': f"max(C or G)/ min(A or T) > {5*self.cut}"},
{'col': 'Hybridization Green (High/Medium)', 'val':self.hybridization_green_A, 'max':self.cut, 'formula': f"Hyb (High/Med) > {self.cut}"},
{'col': 'Hybridization Green (Medium/Low)', 'val':self.hybridization_green_B, 'max':self.cut, 'formula': f"Hyb (Med/Low) > {self.cut}"},
{'col': 'Target Removal Green 1', 'val':self.target_removal_green_1, 'max':self.cut, 'formula': f"(Target ctrl 1 ≤ background) > {self.cut}"},
{'col': 'Target Removal Green 2', 'val':self.target_removal_green_2, 'max':self.cut, 'formula': f"(Target ctrl 2 ≤ background) > {self.cut}"},
{'col': 'Bisulfite Conversion I Green C/U', 'val':self.bisulfite_conversion_I_green_CU, 'min':0, 'mid':0.7*self.cut, 'max':self.cut, 'formula': f"BS min(C1,2,_or_3) / BS max(U1, 2, 3) > {self.cut}"},
{'col': 'Bisulfite Conversion I Green bkg/U', 'val':self.bisulfite_conversion_I_green_bkg_U, 'min':0, 'mid':0.7*self.cut, 'max':self.cut, 'formula': f"BS U ≤ background"},
{'col': 'Bisulfite Conversion I Red C/U', 'val':self.bisulfite_conversion_I_red_CU, 'min':0, 'mid':0.7*self.cut, 'max':self.cut, 'formula': f"BS min(C1,2,_or_3) / BS max(U1, 2, 3) > {self.cut}"},
{'col': 'Bisulfite Conversion I Red bkg/U', 'val':self.bisulfite_conversion_I_red_bkg_U, 'min':0, 'mid':0.7*self.cut, 'max':self.cut, 'formula': f"BS U ≤ background"},
{'col': 'Bisulfite Conversion II Red/Green', 'val':self.bisulfite_conversion_II_red_ratio, 'min':0, 'mid':0.7*self.cut, 'max':self.cut, 'formula': f"BS II min(Red)/max(Grn) > {self.cut}"},
{'col': 'Bisulfite Conversion II bkg/Green', 'val':self.bisulfite_conversion_II_green_bkg, 'min':0, 'mid':0.7*self.cut, 'max':self.cut, 'formula': f"BS II bkg/Grn > {self.cut}"},
{'col': 'Specificity I Green', 'val':self.specificity_I_green, 'min':0, 'mid':0.7, 'max':1.0, 'formula': f"GT Mismatch (min(PM)/max(MM)) > 1"}, # guide says DON'T change the threshold
{'col': 'Specificity I Red', 'val':self.specificity_I_red, 'min':0, 'mid':0.7, 'max':1.0, 'formula': f"GT Mismatch (min(PM)/max(MM)) > 1"}, # guide says DON'T change the threshold
{'col': 'Specificity II', 'val':self.specificity_II, 'min':0, 'mid':0.7, 'max':self.cut, 'formula': f"(S_Red/ S_Green) > {self.cut}"},
{'col': 'Specificity II Bkg', 'val':self.specificity_II_bkg, 'min':0, 'mid':0.7, 'max':self.cut, 'formula': f"(background/ S_Green) > {self.cut}"},
{'col': 'Non-polymorphic Green', 'val':self.nonpolymorphic_green_lowCG_highAT, 'min':0, 'mid':0.7*self.cut, 'max':5*self.cut, 'formula': f"(min(CG)/ max(AT)) > {5*self.cut}"},
{'col': 'Non-polymorphic Red', 'val':self.nonpolymorphic_red_lowAT_highCG, 'min':0, 'mid':0.7*self.cut, 'max':5*self.cut, 'formula': f"(min(AT)/ max(CG)) > {5*self.cut}"},
{'col': 'Baseline Green', 'val':baseline_G - self.bg_offset, 'formula': "max(Extension (A), Extension (T)) no offset", 'max':400, 'med':200, 'min':100},
{'col': 'Baseline Red', 'val':baseline_R - self.bg_offset, 'formula': "max(Extension (C), Extension (G)) no offset", 'max':800, 'med':400, 'min':100},
{'col': 'Negative Baseline G', 'val':self.negative_control_mean_green, 'formula': "mean NEGATIVE Green control probes"},
{'col': 'Negative Baseline R', 'val':self.negative_control_mean_red, 'formula': "mean NEGATIVE Red control probes"},
{'col': 'Regression NORM_GA', 'val':self.regression_NORM_GA, 'formula': "NORM_G (grn) vs NORM_A (red)", 'max':0.8, 'med':0.8, 'min':0.8},
{'col': 'Regression NORM_CT', 'val':self.regression_NORM_CT, 'formula': "NORM_C (grn) vs NORM_T (red)", 'max':0.8, 'med':0.8, 'min':0.8},
{'col': 'GCT score', 'val':self.gct_score, 'formula': "mean(oobG extC)/mean(oobG extT)", 'max':1.0, 'med':0.99, 'min':0.93},
#{'col': 'NORM_A', 'val':self.norm_A_mean_green, 'formula': "mean NORM_A control probes Green)", 'max':600, 'med':300, 'min':100},
#{'col': 'NORM_T', 'val':self.norm_T_mean_green, 'formula': "mean NORM_T control probes Green)", 'max':400, 'med':200, 'min':100},
#{'col': 'NORM_C', 'val':self.norm_C_mean_red, 'formula': "mean NORM_C control probes Red)", 'max':1000, 'med':900, 'min':100},
#{'col': 'NORM_G', 'val':self.norm_G_mean_red, 'formula': "mean NORM_G control probes Red)", 'max':1000, 'med':900, 'min':100},
{'col': 'Passing Probes', 'val':(100 - self.failed_probes), 'formula': f"(p ≤ {self.pval_sig}) > 80% probes", 'max':80, 'med':80, 'min':80},
]
row = {'Sample': sample}
if self.pval is not True:
try:
self.data[sample].remove({'col': 'Passing Probes', 'val':(100 - self.failed_probes), 'formula': f"(p ≤ {self.pval_sig}) > 80% probes", 'max':80, 'med':80, 'min':80})
except ValueError:
pass # poobah file could be missing, in which case it never gets calculated.
if self.gct_score is None or np.isnan(self.gct_score) is True:
try:
self.data[sample].remove({'col': 'GCT score', 'val':self.gct_score, 'formula': "mean(oobG extC)/mean(oobG extT)", 'max':1.0, 'med':0.99, 'min':0.93})
except ValueError as e:
print(f'ERROR {e}')
pass # noob_meth file could be missing, in which case it never gets calculated.
# and mouse is not supported...
row.update({k['col']:k['val'] for k in self.data[sample]})
# DEBUG: everything is rounding OKAY at this point
#if any([ (len(str(v).split(".")[1]) if '.' in str(v) else 0) > self.roundoff for k,v in row.items()]):
# print('ERROR', [len(str(v).split(".")[1]) if '.' in str(v) else 0 for k,v in row.items()] )
self.report.append(row) # this is converted to excel sheet, but self.data is used to format the data in save()
self.formulas.update( {k['col']:k['formula'] for k in list(self.data.values())[0]} )
# process() adds predicted sex and result column
def process(self):
if hasattr(self, 'noob_meth'):
self.gct_scores = methylcheck.bis_conversion_control(self.noob_meth) # a dict of {sentrix_id:score} pairs
if [v for v in self.gct_scores.values() if v is not None and np.isnan(v) is False] == []:
self.gct_scores = {}
else:
self.gct_scores = {}
for sample,con in self.control.items():
self.process_sample(sample, con) # saves everything on top of last sample, for now. testing.
# predicted_sex, x_median, y_median, x_fail_percent, y_fail_percent,
if hasattr(self, 'samplesheet') and isinstance(self.samplesheet, pd.DataFrame) and hasattr(self, 'noob_meth') and hasattr(self, 'noob_unmeth'):
if self.predict_sex:
LOGGER.info("Predicting Sex and comparing with sample meta data...")
else:
LOGGER.info("Predicting Sex...")
try:
#print(self.noob_meth.shape, self.noob_unmeth.shape)
sex_df = methylcheck.get_sex((self.noob_meth, self.noob_unmeth), array_type=None, verbose=False, plot=False, on_lambda=False, median_cutoff= -2, include_probe_failure_percent=False)
except ValueError as e:
if str(e).startswith('Unsupported Illumina array type'):
# happens with some mouse array versions, but not fatal to the report
LOGGER.warning(f"Skipping get prediction: {e}")
sex_df = | pd.DataFrame() | pandas.DataFrame |
from collections import ChainMap
from datetime import datetime
import pandas as pd
from dbnd._core.tracking.schemas.column_stats import ColumnStatsArgs
from targets.value_meta import ValueMetaConf
from targets.values.pandas_histograms import PandasHistograms
# fmt: off
diverse_df = pd.DataFrame({
'int_column': [6, 7, None, 1, 9, None, 3, 7, 5, 1, 1, 6, 7, 3, 7, 4, 5, 4, 3, 7, 3,
2, None, 6, 6, 2, 4, None, 7, 2, 2, 6, 9, 6, 1, 9, 2, 4, 0, 5, 3, 8,
9, 6, 7, 5, None, 1, 1, 2, None, 5, 6, 8, 6, 9, 1, 9, 5, 9, 6, 5, 6,
8, 9, 1, 9, 4, None, 3, 1, 6, 1, 4, 9, 3, 1, 2, None, 7, 3, 1, 9, 2,
4, 5, 2, 8, 7, 8, 1, 7, 7, 6, 3, 0, 6, 8, 6, 9],
'float_column': [9.0, 4.0, 6.0, 6.0, 7.0, 2.0, 5.0, 1.0, 8.0, 4.0, 3.0, 4.0, 2.0,
7.0, 3.0, 9.0, 7.0, 5.0, 3.0, 9.0, 4.0, 9.0, None, 5.0, 5.0, 2.0,
4.0, 4.0, 7.0, 5.0, 1.0, 8.0, 7.0, 4.0, 1.0, 0.0, 6.0, 2.0, 1.0,
2.0, 7.0, 3.0, 0.0, 8.0, 3.0, 2.0, None, 0.0, 8.0, None, 9.0, 2.0,
2.0, 9.0, 1.0, 6.0, 6.0, 1.0, 0.0, 8.0, 7.0, 9.0, 2.0, 9.0, 9.0,
2.0, 0.0, 7.0, 5.0, 7.0, 3.0, 5.0, 1.0, 2.0, 4.0, 3.0, 1.0, 0.0,
3.0, 1.0, 4.0, 8.0, 2.0, None, 2.0, 9.0, 7.0, 7.0, 8.0, 5.0, 7.0,
None, 7.0, 4.0, 8.0, 7.0, 9.0, 7.0, 6.0, None],
'bool_column': [None, True, None, True, None, None, None, True, True, None, None,
True, None, True, None, None, False, False, None, False, None,
True, False, False, True, None, True, None, False, False, None,
True, False, True, None, None, None, None, None, True, True, None,
None, None, False, None, True, None, True, False, True, True,
False, False, None, False, False, True, True, None, None, True,
True, True, False, None, False, True, False, False, False, None,
False, False, None, True, True, False, None, True, False, False,
True, True, False, None, None, True, False, False, False, False,
False, True, False, False, None, False, True, True],
'str_column': ['baz', 'baz', 'bar', None, '', '', 'baz', 'foo', None, '', 'bar',
None, 'bar', 'baz', '', None, 'foo', None, 'bar', None, 'bar',
'bar', '', None, 'foo', '', 'bar', 'foo', 'baz', None, '', 'bar',
'foo', 'foo', 'foo', 'foo', 'bar', None, None, 'foo', '', '', '',
'bar', 'foo', '', 'bar', '', '', 'baz', 'baz', 'bar', 'baz', 'baz',
None, '', 'foo', '', None, 'baz', 'baz', 'baz', 'foo', 'foo', 'baz',
None, 'foo', None, 'foo', None, 'bar', None, 'bar', 'baz', 'foo',
'foo', None, 'foo', '', 'baz', 'baz', 'baz', None, 'bar', None,
None, 'bar', '', 'foo', 'baz', 'baz', '', 'foo', 'baz', 'foo', '',
'bar', None, 'foo', ''],
"multi_data_types": [
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"foo","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None, | pd.DataFrame({"...": [1]}) | pandas.DataFrame |
import pandas as pd
import os
os.chdir('/home/sameen/maltrail/new')
file_chdir = os.getcwd()
filecsv_list = []
for root, dirs, files in os.walk(file_chdir):
for file in files:
if os.path.splitext(file)[0] != 'all':
#alldata=pd.read_csv(file)
filecsv_list.append(file)
data = | pd.DataFrame() | pandas.DataFrame |
"""
Get data for past matches
"""
import requests
import pandas as pd
import json
import os
from mappings import regions_map, game_mode_map, match_cols, player_cols
# get the starting gameID for the API calls
try:
final_gameID_df = pd.read_csv(os.path.join('output', 'matchData.csv'), usecols=['match_id'])
if len(final_gameID_df) == 1:
final_gameID = 5992892504
else:
final_gameID = final_gameID_df.min()[0] - 1
except pd.errors.EmptyDataError:
final_gameID = 5992892504
# instantiate dataframe that will hold API call processed data
total_match_df = pd.DataFrame()
try:
for match_id in range(final_gameID, final_gameID - 300, -1):
match = requests.get('https://api.opendota.com/api/matches/{}'.format(match_id))
match = json.loads(match.text)
if len(match) == 1:
continue
match_df = | pd.json_normalize(match) | pandas.json_normalize |
import os
import glob
import numpy as np
import pylab as pl
import scipy.io as sio
# for_Jyotika.m
from copy import copy, deepcopy
import pickle
import matplotlib.cm as cm
import pdb
import h5py
import pandas as pd
import scipy.stats as sp_st
import sys
import seaborn as sns
# Raw data
data_dir = "../DataSource/"
data_target_dir = "./data/"
electrophys = "ELECTROPHY"
behavior = "BEHAVIOR"
sub_ipsi_contra = sys.argv[1]
behavior_enrichment = pd.read_excel(data_dir+"/"+"Enrichment.xlsx")
gammas = np.round(np.arange(0.0,1.5,0.17),2)
day_label_order = list(behavior_enrichment.keys())[1:]
enrichment_df = pd.DataFrame(columns=["mouse","time",'Distance','intercept','slope','maximum_distance','total_distance','average_distance',"total_days","short-names"])
temp_df = dict()
for k in list(enrichment_df.keys()):
temp_df[k] = []
days = behavior_enrichment.keys()[1:]
for i in np.arange(len(behavior_enrichment)):
x = behavior_enrichment.iloc[i]
for d in days:
temp_df["Distance"].append(float(x[d]))
temp_df["time"].append(d)
y_dist = np.array(np.array(x)[1:]).astype('float')
ind_nonan = np.where(np.isnan(y_dist)==False)[0]
y_dist1 = y_dist[ind_nonan]
x_days = np.arange(0,len(y_dist1))
coef = np.polyfit(x_days,y_dist1,1)
max_dist = np.max(y_dist1)
tot_dist = np.sum(y_dist1)
temp_df["mouse"].append([np.array(x["Mouse"]) for i in np.arange(len(days)) ])
temp_df["short-names"].append([np.array(x["Mouse"].split('_')[1]) for i in np.arange(len(days)) ])
temp_df["intercept"].append([ coef[1] for i in np.arange(len(days)) ])
temp_df["slope"].append([ coef[0] for i in np.arange(len(days)) ])
temp_df["maximum_distance"].append([ max_dist for i in np.arange(len(days)) ])
temp_df["total_distance"].append([ tot_dist for i in np.arange(len(days)) ])
temp_df["average_distance"].append([ tot_dist/len(y_dist1) for i in np.arange(len(days)) ])
temp_df["total_days"].append([len(y_dist1) for i in np.arange(len(days)) ])
for k in list(enrichment_df):
enrichment_df[k] = np.hstack(temp_df[k])
enrichment_df.to_csv(data_target_dir+"Enrichment_df.csv")
'''
fig = pl.figure(figsize=(16,16))
t1 = fig.add_subplot(111)
g1 = sns.lineplot(x='time',y='Distance',hue='mouse',data=enrichment_df,linewidth=2.5,palette='nipy_spectral',marker='o',ax=t1,sort=False)
fig.savefig(fig_target_dir+"Enrichment_distances.png")
'''
if sub_ipsi_contra == "n":
graph_prop_df = pd.read_csv(data_target_dir+"graph_properties_pandas_for_behav_all.csv")
else:
graph_prop_df = | pd.read_csv(data_target_dir+"graph_properties_pandas_for_behav_sub_contra_ipsi_all.csv") | pandas.read_csv |
#!/usr/bin/env python
import os,sys
import pandas as pd
import argparse
daismdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,daismdir)
import daism.modules.simulation as simulation
import daism.modules.training as training
import daism.modules.prediction as prediction
#--------------------------------------
#--------------------------------------
# main()
parser = argparse.ArgumentParser(description='DAISM-XMBD deconvolution.')
subparsers = parser.add_subparsers(dest='subcommand', help='Select one of the following sub-commands')
# create the parser for the "one-stop DAISM-DNN" command
parser_a = subparsers.add_parser('DAISM', help='one-stop DAISM-XMBD',description="one-stop DAISM-XMBD")
parser_a.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_a.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_a.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_a.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_a.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_a.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_a.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_a.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "DAISM simulation" command
parser_b = subparsers.add_parser('DAISM_simulation', help='training set simulation using DAISM strategy',description='training set simulation using DAISM strategy.')
parser_b.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_b.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_b.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_b.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_b.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_b.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_b.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "Generic simulation" command
parser_c = subparsers.add_parser('Generic_simulation', help='training set simulation using purified cells only',description='training set simulation using purified cells only.')
parser_c.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_c.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_c.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_c.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_c.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "training" command
parser_d = subparsers.add_parser('training', help='train DNN model',description='train DNN model.')
parser_d.add_argument("-trainexp", type=str, help="Simulated samples expression file", default=None)
parser_d.add_argument("-trainfra", type=str, help="Simulated samples ground truth file", default=None)
parser_d.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_d.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "prediction" command
parser_e = subparsers.add_parser('prediction', help='predict using a trained model',description='predict using a trained model.')
parser_e.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_e.add_argument("-model", type=str, help="Deep-learing model file trained by DAISM", default="../output/DAISM_model.pkl")
parser_e.add_argument("-celltype", type=str, help="Model celltypes", default="../output/DAISM_model_celltypes.txt")
parser_e.add_argument("-feature", type=str, help="Model feature", default="../output/DAISM_model_feature.txt")
parser_e.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_e.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
class Options:
random_seed = 777
min_f = 0.01
max_f = 0.99
lr = 1e-4
batchsize = 64
num_epoches = 500
ncuda = 0
def main():
# parse some argument lists
inputArgs = parser.parse_args()
if os.path.exists(inputArgs.outdir)==False:
os.mkdir(inputArgs.outdir)
#### DAISM modules ####
if (inputArgs.subcommand=='DAISM'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = pd.read_csv(inputArgs.califra, sep="\t", index_col=0)
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode = "daism"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\t')
| pd.DataFrame(celltypes) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = | date_range("2017-01-01", periods=2, tz=tz) | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 7 21:33:48 2021
@author: David
"""
import sys
sys.path.append('.')
# import os
# import inspect
from datetime import date
from pathlib import Path
import locale
import pandas as pd
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib.ticker import MultipleLocator
from matplotlib import gridspec
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
import fig_util
from IPython.display import display, Image
INPUT_PATH = r'..\data\RKI\Hospitalisierungen'
OUTPUT_PATH = r'..\output\Hospitalization_Nowcast2'
FILE_PATTERN = '{year:04d}-{month:02d}-{day:02d}_Deutschland_COVID-19-Hospitalisierungen.csv'
START_DATE = '2021-07-29' #'2021-08-01'
END_DATE = date.today().strftime('%Y-%m-%d') # '2021-11-12'
MAX_TRI_LEN = 21
ALL_DATE_RANGE = ['2020-03-03', END_DATE]
#END_DATE = '2021-11-09'
BL_FILTER = 'Thüringen'
BL_FILTER = 'Sachsen' # 200
BL_FILTER = 'Rheinland-Pfalz' # 80
BL_FILTER = 'Berlin' # 160
BL_FILTER = 'Schleswig-Holstein' # 90
BL_FILTER = 'Brandenburg' # 160
BL_FILTER = 'Hessen' # 140
BL_FILTER = 'Niedersachsen' # 70
BL_FILTER = 'Hamburg' # 120
BL_FILTER = 'Baden-Württemberg' # 100
BL_FILTER = 'Nordrhein-Westfalen' # 100
BL_FILTER = 'Bayern' # 140
BL_FILTER = 'Bundesgebiet' # 100
yscale_table = {
'00-04': 4,
'05-14': 2.5,
'15-34': 7,
'35-59': 12,
'60-79': 25,
'80+': 60,
'00+': 15,
'all': 100
}
DO_SEPERATE_TOTAL = True
# SHOW_ONLY_THESE_AG = None
# SHOW_ONLY_THESE_AG = [
# '35-59',
# '60-79',
# '80+',
# '00+'
# ]
SHOW_ONLY_THESE_AG = [
'00+'
]
if DO_SEPERATE_TOTAL:
AG_LIST = [
'00-04',
'05-14',
'15-34',
'35-59',
'60-79',
'80+'
]
else:
AG_LIST = [
'00-04',
'05-14',
'15-34',
'35-59',
'60-79',
'80+',
'00+'
]
SLATE = (0.15, 0.15, 0.15)
# POP_LUT = {
# '00-04': 39.69100,
# '05-14': 75.08700,
# '15-34': 189.21300,
# '35-59': 286.66200,
# '60-79': 181.53300,
# '80+': 59.36400,
# '00+': 831.55000
# }
ytck_table = {
'00-04': 0.1,
'05-14': 0.05,
'15-34': 0.2,
'35-59': 0.25,
'60-79': 0.5,
'80+': 1,
'00+': 0.25,
'all': 2
}
plt_col_table = {
'00-04': (0.8, 0.0, 0.8),
'05-14': (0, 0.5, 0.5),
'15-34': (1, 0.7, 0),
'35-59': (1, 0, 0),
'60-79': (0.6, 0.6, 1),
'80+': (0, 0, 1),
'00+': (0, 0, 0)
}
# %%
plt.rc('axes', axisbelow=True)
locale.setlocale(locale.LC_TIME, 'de-DE')
assert(Path(INPUT_PATH).is_dir())
Path(OUTPUT_PATH).mkdir(parents=True, exist_ok=True)
POP_LUT = pd.read_csv(r'../data/LUT/Bundeslaender2.tsv', sep='\t', comment='#', index_col='Gebiet')
data_input_date_range = pd.date_range(START_DATE, END_DATE, freq='D')
last_year_date_range = pd.date_range(
START_DATE.replace('2021', '2020'),
END_DATE.replace('2021', '2020'), freq='D')
last_year_date_range = pd.date_range(
START_DATE.replace('2021', '2020'),
END_DATE.replace('2021', '2020'), freq='D')
all_date_range = pd.date_range(ALL_DATE_RANGE[0], ALL_DATE_RANGE[1], freq='D')
# pd.DataFrame(index=[], columns={
# 'Datum': pd.Series(dtype='datetime64[ns]'),
# '7T_Hospitalisierung_Faelle': pd.Series(dtype=np.float64),
# 'Datenalter': pd.Series(dtype=np.int64)})
rep_tri_table = { ag: None for ag in AG_LIST }
last_working_date = ''
for dt in data_input_date_range:
fname = INPUT_PATH + '\\' + FILE_PATTERN.format(
year=dt.year, month=dt.month, day=dt.day)
try:
data = pd.read_csv(fname, sep=',', decimal='.', parse_dates=['Datum'])
last_working_date = dt.strftime('%Y-%m-%d')
except FileNotFoundError:
END_DATE = last_working_date
data_input_date_range = | pd.date_range(START_DATE, END_DATE, freq='D') | pandas.date_range |
'''''
Authors: <NAME> (@anabab1999) and <NAME> (@felipezara2013)
'''
from calendars import DayCounts
import pandas as pd
from pandas.tseries.offsets import DateOffset
from bloomberg import BBG
import numpy as np
bbg = BBG()
#Puxando os tickers para a curva zero
tickers_zero_curve = ['S0023Z 1Y BLC2 Curncy',
'S0023Z 1D BLC2 Curncy',
'S0023Z 3M BLC2 Curncy',
'S0023Z 1W BLC2 Curncy',
'S0023Z 10Y BLC2 Curncy',
'S0023Z 1M BLC2 Curncy',
'S0023Z 2Y BLC2 Curncy',
'S0023Z 6M BLC2 Curncy',
'S0023Z 2M BLC2 Curncy',
'S0023Z 5Y BLC2 Curncy',
'S0023Z 4M BLC2 Curncy',
'S0023Z 2D BLC2 Curncy',
'S0023Z 9M BLC2 Curncy',
'S0023Z 3Y BLC2 Curncy',
'S0023Z 4Y BLC2 Curncy',
'S0023Z 50Y BLC2 Curncy',
'S0023Z 12Y BLC2 Curncy',
'S0023Z 18M BLC2 Curncy',
'S0023Z 7Y BLC2 Curncy',
'S0023Z 5M BLC2 Curncy',
'S0023Z 6Y BLC2 Curncy',
'S0023Z 2W BLC2 Curncy',
'S0023Z 11M BLC2 Curncy',
'S0023Z 15M BLC2 Curncy',
'S0023Z 21M BLC2 Curncy',
'S0023Z 15Y BLC2 Curncy',
'S0023Z 25Y BLC2 Curncy',
'S0023Z 8Y BLC2 Curncy',
'S0023Z 10M BLC2 Curncy',
'S0023Z 20Y BLC2 Curncy',
'S0023Z 33M BLC2 Curncy',
'S0023Z 7M BLC2 Curncy',
'S0023Z 8M BLC2 Curncy',
'S0023Z 11Y BLC2 Curncy',
'S0023Z 14Y BLC2 Curncy',
'S0023Z 18Y BLC2 Curncy',
'S0023Z 19Y BLC2 Curncy',
'S0023Z 23D BLC2 Curncy',
'S0023Z 9Y BLC2 Curncy',
'S0023Z 17M BLC2 Curncy',
'S0023Z 1I BLC2 Curncy',
'S0023Z 22Y BLC2 Curncy',
'S0023Z 28Y BLC2 Curncy',
'S0023Z 2I BLC2 Curncy',
'S0023Z 30Y BLC2 Curncy',
'S0023Z 31Y BLC2 Curncy',
'S0023Z 32Y BLC2 Curncy',
'S0023Z 38Y BLC2 Curncy',
'S0023Z 39Y BLC2 Curncy',
'S0023Z 40Y BLC2 Curncy',
'S0023Z 42D BLC2 Curncy',
'S0023Z 48Y BLC2 Curncy']
df_bbg = bbg.fetch_series(tickers_zero_curve, "PX_LAST",
startdate = pd.to_datetime('today'),
enddate = pd.to_datetime('today'))
df_bbg = df_bbg.transpose()
df_bbg_m = bbg.fetch_contract_parameter(tickers_zero_curve, "MATURITY")
''''
The Zero curve will be used on the interpolation, to discover the rate for a specific term.
'''
# fazendo a curva zero
zero_curve = pd.concat([df_bbg, df_bbg_m], axis=1, sort= True).set_index('MATURITY').sort_index()
zero_curve = zero_curve.astype(float)
zero_curve = zero_curve.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='backward', limit_area=None, downcast=None)
zero_curve.index = pd.to_datetime(zero_curve.index)
#def que calcula a parte fixa do contrato de swap
''''
The function below will calculate the value of swap fixed leg
for a specific term. It calculates based on the interpolation of the Zero curve.
'''
def swap_fixed_leg_pv(today, rate, busdays, calendartype, maturity=10, periodcupons=6, notional=1000000):
global zero_curve
dc1 = DayCounts(busdays, calendar=calendartype)
today = pd.to_datetime(today)
date_range = pd.date_range(start=today, end=today + DateOffset(years=maturity), freq=DateOffset(months=periodcupons))
date_range = dc1.modified_following(date_range)
df = pd.DataFrame(data=date_range[:-1], columns=['Accrual Start'])
df['Accrual End'] = date_range[1:]
df['days'] = (df['Accrual End'] - df['Accrual Start']).dt.days
df['Notional'] = notional
df['Principal'] = 0
lastline = df.tail(1)
df.loc[lastline.index, 'Principal'] = notional
df['Payment'] = (df['days']/ 360) * rate * df['Notional']
df['Cash Flow'] = df['Payment'] + df['Principal']
df['Cumulative Days'] = df['days'].cumsum()
days = pd.DataFrame(index = df['Accrual End'])
zero_curve_discount = | pd.concat([zero_curve, days], sort=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[367]:
import numpy as np
from sympy.utilities.iterables import multiset_permutations
import random
import copy
import glob
from IPython import display
import pandas as pd
quotazioni = pd.read_csv ('Quotazioni_Fantacalcio.csv')
from joblib import Parallel, delayed
import multiprocessing
N_cores = multiprocessing.cpu_count()
import progressbar
#pbar = progressbar.progressbar()
# In[368]:
######################################################## START DEFAULT PARAMETERS
struttura_rosa = np.array([3, 8, 8, 6])
#this must contain all the allowed schemes
Formazioni = {
'352': [1, 3, 5, 2],
'343': [1, 3, 4, 3],
'442': [1, 4, 4, 2],
'541': [1, 5, 4, 1],
'532': [1, 5, 3, 2],
'433': [1, 4, 3, 3]
#aggiungere tutte le altre formazioni
}
Fasce_goal = np.array([66, 70, 84, 88, 92, 96, 100])
Fasce_modificatore = np.array([6. , 6.5, 7. , 7.5, 8. ])
Valori_modificatore = np.array([1, 3, 5, 6, 8])
rows_to_skip=[0,1,2,3,4]
N_squadre = 8
######################################################## END DEFAULT PARAMETERS
# In[369]:
def names(num_squadre):
team_names = []
teams = {}
for i in range(1,num_squadre+1):
team_names.append("Team " + str(i))
teams[i] = "Team " + str(i)
return [teams, team_names]
# In[370]:
#[teams, team_names] = names(N_squadre)
# In[371]:
def fixture_gen(teams):
temp = copy.deepcopy(teams)
var = []
while len(temp)>1:
idx = list(temp)
j,k = random.sample(idx,2)
var.append((temp.pop(j),temp.pop(k)))
return var
# In[372]:
def genera_rose(struttura_rosa, num_squadre):
giocatori = np.array(struttura_rosa)*num_squadre
tot_giocatori = sum(giocatori)
[p,d,c,a] = giocatori
por = np.array(range(1,p+1))
dif = np.array(range(1,d+1))+p
cen = np.array(range(1,c+1))+p+d
att = np.array(range(1,a+1))+p+d+c
rosa_por=np.random.choice(por,[struttura_rosa[0],num_squadre],replace=False)
rosa_dif=np.random.choice(dif,[struttura_rosa[1],num_squadre],replace=False)
rosa_cen=np.random.choice(cen,[struttura_rosa[2],num_squadre],replace=False)
rosa_att=np.random.choice(att,[struttura_rosa[3],num_squadre],replace=False)
rosa = np.append(rosa_por,rosa_dif,axis=0)
rosa = np.append(rosa,rosa_cen,axis=0)
rosa = np.append(rosa,rosa_att,axis=0)
return rosa
# In[373]:
#gives back a dataframe with the top 200 players
def top_players(struttura_rosa, quotazioni, num_squadre):
players = {}
j = 1
[p,c,d,a]=struttura_rosa*num_squadre
for k, element in quotazioni.iterrows():
if element['R'] == 'P' and j<=p:
players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]
j+=1
elif element['R'] == 'D' and j<=p+d:
players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]
j+=1
elif element['R'] == 'C' and j<=p+d+c:
players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]
j+=1
elif element['R'] == 'A' and j<=p+d+c+a:
players[j] = [j, element['Id'],element['Nome'],element['Qt. A']]
j+=1
players=pd.DataFrame(players).T
players = players.rename(columns = {0:'My Id',1:'FC Id', 2:'Nome', 3:'Quotazione'})
return players
# In[374]:
#assigns the dictionary grade to the specific team players
def assign_grade(rose, grades_dict):
n,m = np.shape(rose)
grades = np.zeros((n,m))
for i in range(n):
for j in range(m):
if rose[i,j] in grades_dict:
grades[i,j] = grades_dict[rose[i,j]]
return grades
# In[375]:
def modificatore(voti_dif, valori, fasce):
temp = 0
media = np.average(voti_dif)
for i in range(len(fasce)):
if media >= fasce[i]:
temp = valori[i]
return temp
# In[376]:
#for the top 200 players, creates a dictionary mapping 'My Id' to 'voto'
def all_grades_dict(struttura_rosa, quotazioni, voti_giornata, num_squadre):
players = top_players(struttura_rosa, quotazioni, num_squadre)
temp_votes = {}
for k in range(len(voti_giornata['Cod.'])):
if voti_giornata['Cod.'][k] == 'Cod.' or voti_giornata['Cod.'][k] == 'GENOA' or voti_giornata['Cod.'][k] == 'INTER' or voti_giornata['Cod.'][k] =='JUVENTUS'or voti_giornata['Cod.'][k] =='LAZIO'or voti_giornata['Cod.'][k] =='LECCE'or voti_giornata['Cod.'][k] =='MILAN'or voti_giornata['Cod.'][k] =='NAPOLI'or voti_giornata['Cod.'][k] =='PARMA' or voti_giornata['Cod.'][k] =='ROMA' or voti_giornata['Cod.'][k] =='FIORENTINA'or voti_giornata['Cod.'][k] =='SAMPDORIA'or voti_giornata['Cod.'][k] =='SASSUOLO'or voti_giornata['Cod.'][k] =='SPAL' or voti_giornata['Cod.'][k] =='TORINO'or voti_giornata['Cod.'][k] =='GENOA'or voti_giornata['Cod.'][k] =='UDINESE'or voti_giornata['Cod.'][k] =='VERONA'or voti_giornata['Cod.'][k] =='BOLOGNA'or voti_giornata['Cod.'][k] =='BRESCIA'or voti_giornata['Cod.'][k] =='CAGLIARI'or voti_giornata['Cod.'][k] =='ATALANTA':
continue
for j in range(1,len(players['My Id'])+1):
if players['FC Id'][j] == np.float(voti_giornata['Cod.'][k]):
#print(voti_giornata['Cod.'][k])
temp_votes[j]=voti_giornata['Voto'][k]
if temp_votes[j] == '6*':
temp_votes[j]= '6'
return temp_votes # struttura My Id: voto
# In[377]:
#formazioni and dict_voti_giornata (from all_grades_dict) must be dictionaries
def voti_max(rose, struttura_rosa, formazioni, dict_voti_giornata, teams, num_squadre, valori, fasce):
[P,D,C,A]=struttura_rosa
voti_rosa = assign_grade(rose, dict_voti_giornata)
voti ={}
for k in range(num_squadre):
voto = 0
for f in formazioni.items():
#da aggiungere: modificatore difesa
[n_p,n_d,n_c,n_a] = f[1]
idx_p = (-voti_rosa[0:P,k]).argsort()[:n_p]
idx_d = (-voti_rosa[0+P:P+D,k]).argsort()[:n_d]+P
idx_c = (-voti_rosa[0+P+D:C+P+D,k]).argsort()[:n_c]+P+D
idx_a = (-voti_rosa[0+P+D+C:P+D+C+A,k]).argsort()[:n_a]+P+D+C
idx_all = np.hstack((idx_p,idx_d,idx_c,idx_a))
extra = 0
l_temp = copy.deepcopy(voti_rosa[idx_d,k].tolist())
l_temp = np.sort(l_temp)
if n_d >=4 and (l_temp >= 6).sum()>=4:
voti_mod = np.append(l_temp[-3:],voti_rosa[idx_p,k])
extra = modificatore(voti_mod, valori, fasce)
voto = max(voto,np.sum(voti_rosa[idx_all,k]) + extra)
voti[teams[k+1]] = voto
return voti #per ogni combinazione di rose trova il voto massimo di squadra per la giornata
# In[378]:
def goal_scored(voti_squadre, fasce_goal):
team_goals={}
for team, voto in voti_squadre.items():
goals = 0
for i in range(len(fasce_goal)):
if voto >= fasce_goal[i]:
goals = i+1
team_goals[team] = goals
return team_goals
# In[379]:
def points(fixtures, voti_squadre, fasce_goal):
goals = goal_scored(voti_squadre, fasce_goal)
points_temp = {}
matches = len(fixtures)
for m in range(matches):
teams = fixtures[m]
if goals[teams[0]] == goals[teams[1]]:
points_temp[teams[0]]=1
points_temp[teams[1]]=1
elif goals[teams[0]] > goals[teams[1]]:
points_temp[teams[0]]=3
points_temp[teams[1]]=0
elif goals[teams[0]] < goals[teams[1]]:
points_temp[teams[0]]=0
points_temp[teams[1]]=3
return points_temp
# In[380]:
def id_toName(struttura_rosa, quotazioni, rose, num_squadre, team_names):
topPlayers = top_players(struttura_rosa, quotazioni, num_squadre)
rose_nomi=pd.DataFrame(columns=team_names, index=range(np.sum(struttura_rosa)))
for team_name in team_names:
temp_teams = []
for Myid in rose[team_name]:
temp_teams.append(topPlayers['Nome'][Myid])
rose_nomi[team_name] = temp_teams
return rose_nomi
# In[381]:
def all_quot_dict(struttura_rosa, quotazioni, num_squadre):
players = top_players(struttura_rosa, quotazioni, num_squadre)
temp_quot={}
for idx in players['My Id']:
temp_quot[idx] = players['Quotazione'][idx]
return temp_quot # struttura My Id: quotazione
# In[382]:
def assign_quot(rose, quot_dict, team_names):
n,m = np.shape(rose)
quot = np.zeros((n,m))
rose = np.array(rose)
for i in range(n):
for j in range(m):
if rose[i,j] in quot_dict:
quot[i,j] = quot_dict[rose[i,j]]
quot_tot = pd.DataFrame(data=np.sum(quot,axis=0,keepdims=True),columns=team_names).T
return quot_tot
# In[383]:
def simula_campionato(struttura_rosa, team_names, teams, quotazioni, path, num_squadre, valori, fasce, fasce_goal, formazioni):
rose = genera_rose(struttura_rosa, num_squadre)
#voti_giornata is the imported dataframe which will be inserted in the loop
all_points = pd.DataFrame(index = team_names)
all_files = glob.glob(path + "/*.xlsx")
i=1
for filename in all_files:
# this is to be read from file
#print('Giornata attuale:' f'{i}\r', end="")
i+=1
voti_giornata = pd.read_excel(filename,sheet_name=0,skiprows=rows_to_skip)
fixtures = fixture_gen(teams)
dict_voti_giornata = all_grades_dict(struttura_rosa, quotazioni, voti_giornata, num_squadre)
voti_squadre = voti_max(rose, struttura_rosa, formazioni, dict_voti_giornata, teams, num_squadre, valori, fasce)
punti = pd.DataFrame.from_dict(points(fixtures, voti_squadre, fasce_goal),orient='index')
all_points = | pd.concat([all_points,punti],axis=1) | pandas.concat |
import json
import os
import pandas as pd
import pytest
from extra_model._models import ExtraModelBase, ModelBase, extra_factory
ExtraModel = extra_factory()
class MyCustomBase:
pass
def test_extra_factory__no_custom_bases_passed__class_has_ModelBase_as_parent():
Model = extra_factory()
assert issubclass(Model, ModelBase)
def test_extra_factory__no_custom_bases_passed__class_has_ExtraModelBase_as_parent():
Model = extra_factory()
assert issubclass(Model, ExtraModelBase)
def test_extra_factory__custom_bases_passed__class_has_custom_base_as_parent():
Model = extra_factory(MyCustomBase)
assert issubclass(Model, MyCustomBase)
def test_extra_factory__multiple_custom_bases_passed__class_has_all_custom_bases_as_parents():
class MyOtherCustomBase:
pass
Model = extra_factory((MyCustomBase, MyOtherCustomBase))
assert issubclass(Model, MyCustomBase)
assert issubclass(Model, MyOtherCustomBase)
def test_extra_factory__custom_bases_passed__class_does_not_have_ModelBase_as_parent():
Model = extra_factory(MyCustomBase)
assert not issubclass(Model, ModelBase)
def test_extra_factory__custom_bases_passed__class_has_extraModelBase_as_parent():
Model = extra_factory(MyCustomBase)
assert issubclass(Model, ExtraModelBase)
@pytest.fixture
def tmp_untrained_res_models_folder_ExtraModel():
resources_folder = os.path.dirname(os.path.realpath(__file__)) + "/resources/"
untrained = ExtraModel(
models_folder=resources_folder, embedding_type="small_embeddings"
)
return untrained
@pytest.fixture
def tmp_untrained_tmp_models_folder_ExtraModel(tmpdir_factory):
tmp_folder = tmpdir_factory.mktemp("tmp_models")
untrained = ExtraModel(models_folder=tmp_folder, embedding_type="small_embeddings")
yield untrained
@pytest.fixture
def tmp_trained_ExtraModel():
resources_folder = os.path.dirname(os.path.realpath(__file__)) + "/resources/"
trained = ExtraModel(
models_folder=resources_folder, embedding_type="small_embeddings"
)
trained.load_from_files()
return trained
@pytest.fixture
def test_comments():
input_ = pd.read_csv("./tests/resources/100_comments.csv")
return input_.to_dict("records")
def test_create(tmp_untrained_tmp_models_folder_ExtraModel, test_comments):
# test for initialization
assert not tmp_untrained_tmp_models_folder_ExtraModel.is_trained
# test that prediction fails without loading
with pytest.raises(RuntimeError):
tmp_untrained_tmp_models_folder_ExtraModel.predict(comments=test_comments)
# for filekey in tmp_untrained_tmp_models_folder_ExtraModel.filenames:
# assert filekey in tmp_untrained_tmp_models_folder_ExtraModel.storage_metadata()
def test_load_from_files(tmp_untrained_res_models_folder_ExtraModel):
assert not tmp_untrained_res_models_folder_ExtraModel.is_trained
tmp_untrained_res_models_folder_ExtraModel.load_from_files()
assert tmp_untrained_res_models_folder_ExtraModel.is_trained
def test_predict(tmp_trained_ExtraModel, test_comments):
# Extra is an unsupervised algorithm, so not possible to guarantee certain output
res = tmp_trained_ExtraModel.predict(comments=test_comments)
res_names = set( | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
import numpy as np
import itertools
from scipy.spatial.distance import pdist
import xgboost as xgb
#Center-scale function
def normalize(x):
return (x - x.mean())/(x - x.mean()).std()
#Center-scale function for numpy
def np_normalize(input_vector):
return((input_vector - input_vector.mean())/((input_vector - input_vector.mean()).std(ddof=1)))
#Calculate distances between features
def feature_distances(input_vector):
modified_vector = np.array(input_vector).reshape(-1,1)
vector_distances = pdist(modified_vector, 'euclidean')
return vector_distances
#Center-scale function for shuffled data
def scale_shuffled(input_vector):
if input_vector.var() == 0:
input_vector.values[:] = 0
else:
input_vector = input_vector.transform(normalize)
return input_vector
#Load data
tested_odor_desc = pd.read_csv("./../compiled_desc_resp/filtered_tested_desc.csv", index_col = 0)
tested_odor_resp = pd.read_csv("./../compiled_desc_resp/compiled_odor_sigResp_wide.csv", index_col = 0)
#Normalize data
norm_tested_odor_desc = tested_odor_desc.transform(normalize)
norm_tested_odor_resp = tested_odor_resp.transform(normalize)
#Load shuffled data
shuffled_odor_desc = pd.read_csv("./../compiled_desc_resp/shuffled_filtered_tested_desc.csv", index_col = 0)
shuffled_odor_desc.index = tested_odor_desc.index
shuffled_odor_desc.columns = tested_odor_desc.columns
#Normalize shuffled data
norm_shuffled_odor_desc = shuffled_odor_desc.transform(scale_shuffled)
norm_shuffled_odor_desc.columns = norm_tested_odor_desc.columns
#Convert base data to numpy array for faster processing
tested_odor_desc = np.array(tested_odor_desc)
tested_odor_resp = np.array(tested_odor_resp)
shuffled_odor_desc = np.array(shuffled_odor_desc)
#Convert normalized data to numpy array for faster processing
norm_tested_odor_desc = np.array(norm_tested_odor_desc)
norm_tested_odor_resp = np.array(norm_tested_odor_resp)
norm_shuffled_odor_desc = np.array(norm_shuffled_odor_desc)
xgb_predictions = | pd.DataFrame() | pandas.DataFrame |
from aniachi.systemUtils import Welcome as W
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from pyramid.response import FileResponse
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from fbprophet import Prophet
from termcolor import colored
import os
import numpy as np
import pandas as pd
import pkg_resources
import matplotlib.pyplot as plt
import matplotlib
from io import StringIO
from io import BytesIO
import xml.etree.ElementTree as et
import pickle as pkl
import base64
import traceback
import datetime
import openpyxl
import setuptools
import aniachi
import socket
import pyqrcode
import argparse
import textwrap
port = 8080
file ='mx_us.csv'
@view_config(route_name='hello', renderer='home.jinja2')
def hello_world(request):
return {'name': 'Running Server','port':port,'pyramid':pkg_resources.get_distribution('pyramid').version
,'numpy':np.__version__,'pandas':pd.__version__ ,'favicon':'aniachi_logo.png','matplotlib':matplotlib.__version__,
'fbprophet':pkg_resources.get_distribution('fbprophet').version,'openpyxl ':openpyxl.__version__,'setuptools':setuptools.__version__,
'py_common_fetch':pkg_resources.get_distribution('py-common-fetch').version,'host':socket.gethostbyname(socket.gethostname()),
'pyqrcode':pkg_resources.get_distribution('pyqrcode').version,'argparse':argparse.__version__,'pypng':pkg_resources.get_distribution('pypng').version
}
#
#
@view_config(route_name='entry')
def entry_point(request):
return HTTPFound(location='app/welcome')
#
#
def getParamterOrdefault(d,k,v,valid):
aux = v
try:
if (d[k] in valid): aux = d[k]
except Exception as e:
pass
return aux
#
#
def getIntParameter(d,k,v,r):
aux=int(v)
try:
if isinstance(int(d[k]), int):
if int(d[k]) in r:
aux= int(d[k])
except Exception as e:
pass
return aux
def getDataframe():
return pd.read_csv(os.path.join(os.getcwd(),file))
#
#
def getFilteredDataframe():
mx_peso = getDataframe()
mx_peso.columns = ['date', 'mx_usd']
mx_peso.date = pd.to_datetime(mx_peso['date'], format='%Y-%m-%d')
# remove dots
mx_peso = mx_peso[mx_peso['mx_usd'] != '.']
mx_peso.mx_usd = mx_peso.mx_usd.astype(float)
return mx_peso
#
#
def getForecastData(days=120):
mx_peso = getDataframe()
mx_peso.columns = ['date', 'mx_usd']
mx_peso.date = pd.to_datetime(mx_peso['date'], format='%Y-%m-%d')
# remove dots
mx_peso = mx_peso[mx_peso['mx_usd'] != '.']
mx_peso.mx_usd = mx_peso.mx_usd.astype(float)
df = | pd.DataFrame.copy(mx_peso, deep=True) | pandas.DataFrame.copy |
# -*- coding: utf-8 -*-
"""
Created on Thu May 6 08:01:51 2021
@author: 86159
"""
import math
import os
import numpy as np
# import matplotlib.pyplot as plt
import json
import pandas as pd
# import random
import time
import copy
# import datetime
# from threading import Timer
import time
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def find_station(stops_route):
return stops_route[stops_route["type"]=="Station"].index.to_list()[0]
def fullfillzone(stops_route):
# 除了station,其他的stop都归到离他最近的zone中
for s in stops_route.index.to_list():
if s != find_station(stops_route):
if pd.isnull(stops_route.loc[s,'zone_id'])==True:
min_=100000
for s2 in stops_route.index.to_list():
lat1=stops_route.loc[s,'lat']
lng1=stops_route.loc[s,'lng']
lat2=stops_route.loc[s2,'lat']
lng2=stops_route.loc[s2,'lng']
# print(lat1, lng1, lat2, lng2)
dis=getDistance(lat1, lng1, lat2, lng2)
# print(dis)
if dis<min_ and s != s2 and pd.isnull(stops_route.loc[s2,'zone_id'])==False:
min_=dis
min_stop=s2
stops_route.loc[s,'zone_id']=stops_route.loc[min_stop,'zone_id']
return stops_route
# 返回和stop相同zone的一条route里的所有stop
def find_zone_stop(stop):
zone=stops_route.loc[stop,'zone_id']
if pd.isnull(zone)==False:
stopls=stops_route.loc[stops_route['zone_id']==zone].index.to_list()
else: stopls=[stop]#若stop的zone是nan,则返回本身
return stopls
def find_zone_center():
zonels={}
stopls=[]
for s in data_travel_route.index.to_list():
if stops_route.loc[s,'zone_id'] in zonels.keys(): continue
stopls=find_zone_stop(s)
sum_x=0
sum_y=0
for samezone_s in stopls:
sum_x+=stops_route.loc[samezone_s,'lat']
sum_y+=stops_route.loc[samezone_s,'lng']
zonels[stops_route.loc[samezone_s,'zone_id']]=[]
zonels[stops_route.loc[samezone_s,'zone_id']].append(sum_x/len(stopls))
zonels[stops_route.loc[samezone_s,'zone_id']].append(sum_y/len(stopls))
return zonels
def greedy_zone_seq():
station=find_station(stops_route)
lat1=stops_route.loc[station,'lat']
lng1=stops_route.loc[station,'lng']
zonels=find_zone_center()
# 寻找离station最近的作为第一个zone
min_=10000000
for i in zonels.keys():
lat2=zonels[i][0]
lng2=zonels[i][1]
dis=getDistance(lat1, lng1, lat2, lng2)
if dis<min_:
min_=dis
zone1=i
# 得到disX的dataframe
disX={}
for z1 in zonels.keys():
disX[z1]={}
lat1=zonels[z1][0]
lng1=zonels[z1][1]
for z2 in zonels.keys():
lat2=zonels[z2][0]
lng2=zonels[z2][1]
disX[z1][z2]=getDistance(lat1, lng1, lat2, lng2)
disX=pd.DataFrame(disX)
return greedy(disX,zone1)
def rad(d):
return math.pi/180.0*d
def getDistance(lat1,lng1,lat2,lng2):
radLat1 = rad(lat1)
radLat2 = rad(lat2)
a = radLat1 - radLat2
b = rad(lng1) - rad(lng2)
s = 2 * math.asin(math.sqrt(
math.pow(math.sin(a / 2), 2) + math.cos(radLat1) * math.cos(radLat2) * math.pow(math.sin(b / 2), 2)))
EARTH_RADIUS = 6378.137
s = s * EARTH_RADIUS
s = (s * 10000) / 10
return s
class Instance:
def __init__(self, file_name):
self.file_name=file_name
self.data_route, self.route_num, self.stops = self.get_route_data(file_name[0])
self.data_travel = self.get_travel_times(file_name[1])
self.data_package,self.window_stop_info = self.get_package_data(file_name[2])
# self.data_sequences = self.get_actual_sequences(file_name[3])
def get_route_data(self, file_name):
with open(file_name,encoding='utf-8') as f1:
line = f1.readline()
d1 = json.loads(line)
f1.close()
data_route=pd.DataFrame(d1).T
## 去除掉stop的数据框
# columnList=data_route.columns.to_list().remove('stops')
# data_route2=data_route[['station_code','date_YYYY_MM_DD',
# 'departure_time_utc','executor_capacity_cm3','route_score']]
data_route2=data_route[['station_code','date_YYYY_MM_DD',
'departure_time_utc','executor_capacity_cm3']]
# print(data_route.columns.to_list())
route_num=data_route.shape[0]
## stops
stops={}
for routeIdx in data_route.index.to_list():
stops[routeIdx]={}
for stopID,stopinfo in data_route.loc[routeIdx,'stops'].items():
stops[routeIdx][stopID]=stopinfo
stops[routeIdx]=pd.DataFrame(stops[routeIdx]).T
return data_route2,route_num,stops
def get_travel_times(self, file_name):
with open(file_name,encoding='utf-8') as f2:
line = f2.readline()
d2 = json.loads(line)
f2.close()
data_travel={}
for route in d2.keys():
data_travel[route]={}
for stop1 in d2[route].keys():
data_travel[route][stop1]={}
for stop2 in d2[route][stop1].keys():
data_travel[route][stop1][stop2]=d2[route][stop1][stop2]
data_travel[route]=pd.DataFrame(data_travel[route])
return data_travel
def get_package_data(self, file_name):
with open(file_name,encoding='utf-8') as f3:
line = f3.readline()
d3 = json.loads(line)
f3.close()
# 输出window_stop_info
window_stop_info={}
for route in d3.keys():
window_stop_info[route]={}
for stop in d3[route].keys():
window_stop_info[route][stop]=[]
for package in d3[route][stop].keys():
start_time_utc=d3[route][stop][package]["time_window"]["start_time_utc"]
end_time_utc=d3[route][stop][package]["time_window"]["end_time_utc"]
# if start_time_utc!=nan or end_time_utc!=" :
window_stop_info[route][stop].append([start_time_utc,end_time_utc])
return d3,window_stop_info
def get_actual_sequences(self, file_name):
with open(file_name,encoding='utf-8') as f4:
line = f4.readline()
d4 = json.loads(line)
f4.close()
data_sequences={}
for route in d4.keys():
data_sequences[route]={}
sequence=d4[route]['actual']#一个route的sequences
for stopID,orderID in sequence.items():
if orderID>-1: # 忽略index为-1,即该route不会经过的stop
data_sequences[route][orderID]=stopID
# 排序
temp=[]
data_sequences[route]=dict(sorted(data_sequences[route].items(),key=lambda d:d[0]))
for i in data_sequences[route].values():
temp.append(i)
data_sequences[route] = temp
return data_sequences
def cost_sum(w1,w2,data_travel_route,stops_route):
data_distance_route={}
for i in range(stops_route.shape[0]):
stop1=stops_route.index.to_list()[i]
data_distance_route[stop1]={}
for j in range(stops_route.shape[0]):
stop2=stops_route.index.to_list()[j]
lat1=stops_route.loc[stop1,'lat']
lng1=stops_route.loc[stop1,'lng']
lat2=stops_route.loc[stop2,'lat']
lng2=stops_route.loc[stop2,'lng']
data_distance_route[stop1][stop2]=getDistance(lat1, lng1, lat2, lng2)
data_distance_route= | pd.DataFrame(data_distance_route) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual( | ct(10,unit='ns') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.cluster import FeatureAgglomeration
#Step 1
def import_csv_file(filepath):
voters_data = pd.read_csv(filepath, low_memory=False)
print("🗃️ Data imported!")
return voters_data
#Todo: clean up "mixed types" error message
#Todo: make sure correct dataset
#Step 2
def choose_subset(voters_data, subset):
print("🔴 Number of observations in starting dataset: {}".format(voters_data.shape[0]))
if subset == "2016":
voters_subset = voters_data[voters_data['weight_2016'].isna() == False]
print("🔻 Selected subset of December 2016 survey respondents")
print("⭕ Number of observations in selected subset: {}".format(voters_subset.shape[0]))
elif subset == "2017":
voters_subset = voters_data[voters_data['weight_2017'].isna() == False]
print("🔻 Selected subset of July 2017 survey respondents")
print("⭕ Number of observations in selected subset: {}".format(voters_subset.shape[0]))
elif subset == "panel":
voters_subset = voters_data[voters_data['weight_panel'].isna() == False]
print("🔻 Selected subset of May 2018 survey respondents who were part of the original panel")
print("⭕ Number of observations in selected subset: {}".format(voters_subset.shape[0]))
elif subset == "overall":
voters_subset = voters_data[voters_data['weight_overall'].isna() == False]
print("🔻 Selected subset of *all* May 2018 survey respondents: original panelists, Latino oversample, and 18-24 year old oversample")
print("⭕ Number of observations in selected subset: {}".format(voters_subset.shape[0]))
elif subset == "latino":
voters_subset = voters_data[voters_data['weight_latino'].isna() == False]
print("🔻 Selected subset of May 2018 respondents who are part of the Latino oversample")
print("⭕ Number of observations in selected subset: {}".format(voters_subset.shape[0]))
elif subset == "18_24":
voters_subset = voters_data[voters_data['weight_18_24'].isna() == False]
print("🔻 Selected subset of May 2018 respondents who are part of the 18-24 year old oversample")
print("⭕ Number of observations in selected subset: {}".format(voters_subset.shape[0]))
else:
voters_subset = "⚠️ No subset selected. Set the parameter `subset` to be one of the following strings: '2016', '2017', 'panel', 'overall', 'latino', '18_24'"
print(voters_subset)
return voters_subset
#Todo: simplify
#Step 4
def apply_weights(voters_data, subset, magnitude=3):
subset_weights = "weight_{}".format(subset)
if subset_weights not in voters_data.columns:
print("⚠️ Set the parameter `subset` to be one of the following strings: '2016', '2017', 'panel', 'overall', 'latino', '18_24'")
weights = voters_data[subset_weights]
new_index = []
new_units = []
for index_no in tqdm(weights.index, desc="Expanding data according to weight"):
units = int(round(weights[index_no] * magnitude))
for unit in range(0, units):
new_unit = dict(voters_data.loc[index_no])
new_index_no = round(index_no + unit*.001, ndigits=3)
new_index.append(new_index_no)
new_units.append(new_unit)
reconstituted_voters_data = pd.DataFrame(new_units, index=new_index, columns=voters_data.columns)
print("🔼🔼🔺🔼🔺🔺 Data unpacked according to provided weights.")
return reconstituted_voters_data
###############################
# Choose Features of Interest #
###############################
class FeaturesPicker():
def __init__(self, IssueImportance = False,
FeelingThermometer = False,
Favorability = False,
InstitutionalConfidence = False):
self.feature_picker_list = [{'category' : 'Issue Importance',
'import' : IssueImportance,
'prefixes' : ('imiss_'),
'keywords' : [],
'exceptions' : []},
{'category' : 'Feeling Thermometer',
'import' : FeelingThermometer,
'prefixes' : ('ft'),
'keywords' : [],
'exceptions' : []},
{'category' : 'Favorability',
'import' : Favorability,
'prefixes' : ('fav'),
'keywords' : [],
'exceptions' : []},
{'category' : 'Institutional Confidence',
'import' : InstitutionalConfidence,
'prefixes' : ('inst_'),
'keywords' : [],
'exceptions' : []}
]
def clean(self, voters_data, RandomizationFeatures = True,
StartEndTimes = True):
feature_cleaner_list = [{'category' : 'Randomization Features',
'import' : RandomizationFeatures,
'prefixes' : (),
'keywords' : ['rnd'],
'exceptions' : []},
{'category' : 'Start/End Times',
'import' : StartEndTimes,
'prefixes' : ('starttime_', 'endtime_'),
'keywords' : [],
'exceptions' : []},
]
for feature_cleaner in feature_cleaner_list:
if feature_cleaner['import'] == True:
for feature in list(voters_data.columns):
if feature.startswith((feature_cleaner['prefixes'])) == True:
voters_data.pop(feature)
for keyword in feature_cleaner['keywords']:
if keyword in feature:
voters_data.pop(feature)
return voters_data
def process(self, voters_data):
features_of_interest = []
for feature_picker in self.feature_picker_list:
if feature_picker['import'] == True:
for feature in list(voters_data.columns):
if feature.startswith((feature_picker['prefixes'])) == True:
features_of_interest.append(feature)
cleaned_features_of_interest = self.clean(voters_data[features_of_interest])
selected_features_readable = ", ".join([x['category'] for x in self.feature_picker_list if x['import'] == True])
print("🔵 {} features out of {} selected, relevant to: {}".format(len(list(cleaned_features_of_interest.columns)),len(list(voters_data.columns)),selected_features_readable))
return cleaned_features_of_interest
########################
# NARROW DOWN FEATURES #
########################
important_features = ["case_identifier", "caseid", "weight_panel", "weight_latino", "weight_18_24", "weight_overall", "cassfullcd", "starttime", "endtime"]
def check_feature_exists(reconstructed_feature, feature_list):
"""Subfunction for condense_by_most_recent_feature(), perhaps turn into lambda"""
if reconstructed_feature in feature_list:
match = True
else:
match = False
return match
def reconstruct_feature(x):
"""Subfunction for condense_by_most_recent_feature(), perhaps turn into lambda"""
return x.index + x.name # Source: https://stackoverflow.com/a/43654808
def condense_by_most_recent_feature(voters_data):
condensed_data = voters_data.copy()
wave_suffix_list = ["_baseline", "_2012", "_2016", "_2017", "_2018"]
feature_list = list(voters_data.columns)
feature_root_set = set()
for wave_suffix in wave_suffix_list:
for feature in feature_list:
if feature.endswith(wave_suffix):
feature_root = feature.replace(wave_suffix, "")
feature_root_set.add(feature_root)
feature_grid = | pd.DataFrame(index=feature_root_set, columns=wave_suffix_list) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
def tangency_weights(returns,dropna=True,scale_cov=1):
if dropna:
returns = returns.dropna()
covmat_full = returns.cov()
covmat_diag = np.diag(np.diag(covmat_full))
covmat = scale_cov * covmat_full + (1-scale_cov) * covmat_diag
weights = np.linalg.solve(covmat,returns.mean())
weights = weights / weights.sum()
return pd.DataFrame(weights, index=returns.columns)
def performanceMetrics(returns,annualization=1, quantile=.05):
metrics = pd.DataFrame(index=returns.columns)
metrics['Mean'] = returns.mean() * annualization
metrics['Vol'] = returns.std() * np.sqrt(annualization)
metrics['Sharpe'] = (returns.mean() / returns.std()) * np.sqrt(annualization)
metrics['Min'] = returns.min()
metrics['Max'] = returns.max()
metrics[f'VaR ({quantile})'] = returns.quantile(quantile)
metrics[f'CVaR ({quantile})'] = (returns[returns < returns.quantile(quantile)]).mean()
return metrics
def maximumDrawdown(returns):
cum_returns = (1 + returns).cumprod()
rolling_max = cum_returns.cummax()
drawdown = (cum_returns - rolling_max) / rolling_max
max_drawdown = drawdown.min()
end_date = drawdown.idxmin()
summary = pd.DataFrame({'Max Drawdown': max_drawdown, 'Bottom': end_date})
# The rest of this code is to get the peak and Recover dates.
# It is tedious, and I recommend skipping the rest of this code unless you are
# already comfortable with Python and Pandas.
# get the date at which the return recovers to previous high after the drawdown
summary['Recover'] = None
for col in returns.columns:
idx = returns.index[(returns.index >= end_date[col]).argmax()]
check_recover = (cum_returns.loc[idx:, col] > rolling_max.loc[idx, col])
if check_recover.any():
summary.loc[col, 'Recover'] = check_recover.idxmax()
summary['Recover'] = pd.to_datetime(summary['Recover'])
# get the date at which the return peaks before entering the max drawdown
summary.insert(loc=1, column='Peak', value=0)
for col in returns.columns:
df = rolling_max.copy()[[col]]
df.columns = ['max']
df['max date'] = df.index
df = df.merge(df.groupby('max')[['max date']].first().reset_index(), on='max')
df.rename(columns={'max date_y': 'max date', 'max date_x': 'date'}, inplace=True)
df.set_index('date', inplace=True)
summary.loc[col, 'Peak'] = df.loc[end_date[col], 'max date']
summary['Peak'] = pd.to_datetime(summary['Peak'])
summary['Peak to Recover'] = (summary['Recover'] - summary['Peak'])
return summary
def get_ols_metrics(regressors, targets, annualization=1):
# ensure regressors and targets are pandas dataframes, as expected
if not isinstance(regressors, pd.DataFrame):
regressors = regressors.to_frame()
if not isinstance(targets, pd.DataFrame):
targets = targets.to_frame()
# align the targets and regressors on the same dates
df_aligned = targets.join(regressors, how='inner', lsuffix='y ')
Y = df_aligned[targets.columns]
X = df_aligned[regressors.columns]
reg = | pd.DataFrame(index=targets.columns) | pandas.DataFrame |
import os, functools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import skfuzzy as fuzz
from kneed import KneeLocator
from sklearn.decomposition import PCA
from GEN_Utils import FileHandling
from loguru import logger
logger.info("Import ok")
def multiple_PCAs(test_dict):
"""test_dict: dict mapping str(data_type): (df, sample_cols)"""
pcas = {}
for data_type, (data, sample_cols) in test_dict.items():
for_PCA = data[sample_cols].copy().fillna(0)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(for_PCA.values)
principalDf = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2'])
principalDf.index = data['Sequence']
pcas[data_type] = principalDf
logger.info(f'{data_type}: {len(principalDf)}')
# visualise the PCA
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1', fontsize = 15)
ax.set_ylabel('Principal Component 2', fontsize = 15)
ax.set_title(data_type, fontsize = 20)
ax.scatter(principalDf['PC1'] , principalDf['PC2'], s = 50)
# plt.savefig(f'{output_folder}{data_type}_PCA.png')
return pcas
def fuzzy_clustering(data_type, data, sample_cols, max_clusters=10):
alldata = data[sample_cols].fillna(0).T.values
fpcs = []
cluster_membership = {}
cluster_score = {}
cluster_parameters = {}
for ncenters in range(2, max_clusters):
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(
alldata, ncenters, 4, error=0.005, maxiter=1000, init=None)
cluster_parameters[ncenters] = [cntr, u, u0, d, jm, p, fpc]
cluster_membership[ncenters] = np.argmax(u, axis=0)
cluster_score[ncenters] = np.max(u, axis=0)
# Store fpc values for later
fpcs.append(fpc)
# clean cluster parameters
parameters = pd.DataFrame(cluster_parameters)
parameters.index = ['cntr', 'u', 'u0', 'd', 'jm', 'p', 'fpc']
# clean cluster data
membership = pd.DataFrame(cluster_membership)
membership.index = data.index.tolist()
membership.columns = [f'member_{col}' for col in membership.columns.tolist()]
score = pd.DataFrame(cluster_score)
score.index = data.index.tolist()
score.columns = [f'score_{col}' for col in score.columns.tolist()]
# Generate merged cluster info
cluster_membership = | pd.merge(score, membership, left_index=True, right_index=True) | pandas.merge |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
import os, requests, uuid, json
import pandas as pd
import numpy as np
from io import BytesIO,StringIO
import xlsxwriter
import spacy
nlp = spacy.load('en_core_web_lg')
from spacy_langdetect import LanguageDetector
nlp.add_pipe(LanguageDetector(),name="LanguageDetector",last=True)
from spacy.lang.en import English
from spacy.lang.en.stop_words import STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer,TfidfTransformer
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
from spacy.lemmatizer import Lemmatizer
import pickle, joblib
import string
import re
import matplotlib.pyplot as plt
from collections import Counter
from PIL import Image
from sklearn.svm import LinearSVC
import configparser
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
analyzer_vader = SentimentIntensityAnalyzer()
# remove words from stop words list
nlp.vocab["not"].is_stop = False
nlp.vocab["never"].is_stop = False
nlp.vocab['im'].is_stop = True
nlp.vocab["ive"].is_stop = True
nlp.vocab["nothing"].is_stop = False
class classification_class:
def __init__(self):
self.df_agent = pd.read_csv("./BI_Chat_Analysis/agents.csv")
self.df_overall = pd.read_csv("./BI_Chat_Analysis/overall.csv")
self.df_similar = pd.read_csv("C:/Labs/BI_All_Channels/BI_Chat_Analysis/BI_Chat_Analysis/Dataset.csv")
self.df_similar['keyword_NoPunct'] = self.df_similar['Keyword'].apply(lambda x: self.remove_punct(x))
self.df_similar['keyword_tokenized'] = self.df_similar['keyword_NoPunct'].apply(lambda x: self.tokenization(x.lower()))
self.df_similar['keywordRemoveStopWords'] = self.df_similar['keyword_tokenized'].apply(lambda x: self.remove_stopwords(x))
self.df_similar['keyword_nostop'] = self.df_similar['keywordRemoveStopWords'].apply(lambda x: self.convert_string(x))
self.df_similar['keyword_lemmas'] = self.df_similar['keyword_nostop'].apply(lambda x: self.lemmatization(x))
self.df_similar['String_keyword'] = self.df_similar['keyword_lemmas'].apply(lambda x: self.convert_string(x))
self.df_similar['String_nlp'] = self.df_similar['String_keyword'].apply(lambda x: nlp(x))
def remove_punct(self,text):
text = "".join([char for char in text if char not in string.punctuation])
text = re.sub('[0-9]+', ' ', text)
return text
def tokenization(self,text):
text = nlp(text)
text = [token.text for token in text]
return text
def remove_stopwords(self,text):
filtered_sentence = []
for word in text:
lexeme = nlp.vocab[word]
if lexeme.is_stop == False:
filtered_sentence.append(word)
return filtered_sentence
def lemmatization(self,text):
text = nlp(text)
text_lemma = [token.lemma_ for token in text]
return text_lemma
def convert_string(self,list_value):
return ' '.join(list_value)
def get_categories(self,doc):
pred_similar = ''
ratio_list = []
flag_agent = 0
for i in self.df_agent['agents']:
if i in doc:
pred_similar = self.df_agent['Category'].unique()[0]
flag_agent = 1
break
if flag_agent == False:
nlp_doc = nlp(doc)
for token in nlp_doc:
if token.pos_ == "PROPN" and token.tag_ == "NNP" and token.ent_type_ == "PERSON":
pred_similar = self.df_agent['Category'].unique()[0]
flag_agent = 1
break
if flag_agent == False:
flag_overall = any(element in doc for element in self.df_overall['keyword'].values)
if flag_overall == False:
pred_similar = self.df_overall['Category'].unique()[0]
flag_agent = 1
if flag_agent == False:
nlp_doc = nlp(doc)
ratio_list = list(map(lambda x: nlp_doc.similarity(x),self.df_similar['String_nlp'].values))
pred_index = ratio_list.index(max(ratio_list))
pred_similar = self.df_similar.loc[pred_index,'Category']
return pred_similar
def classify(self,data):
data['Comments_NoPunct'] = data['Translated_Comments'].apply(lambda x: self.remove_punct(x))
data['Comments_tokenized'] = data['Comments_NoPunct'].apply(lambda x: self.tokenization(x.lower()))
data['CommentsRemoveStopWords'] = data['Comments_tokenized'].apply(lambda x: self.remove_stopwords(x))
data['string_nostop'] = data['CommentsRemoveStopWords'].apply(lambda x: self.convert_string(x))
data['lemmas'] = data['string_nostop'].apply(lambda x: self.lemmatization(x))
data['String_Comments'] = data['lemmas'].apply(lambda x: self.convert_string(x))
pred_list = []
sentiment_list = []
pred_list = list(map(lambda x :self.get_categories(x),data['String_Comments'].values))
sentiment_list = list(map(lambda y :self.sentiments_prediction(y),data['Translated_Comments'].values))
return pred_list,sentiment_list
def sentiment_validation(self,comment):
blob = TextBlob(comment,analyzer=NaiveBayesAnalyzer())
val = blob.sentiment[0]
if val == 'pos':
sentiment_value = 'Positive'
else:
sentiment_value = 'Negative'
return sentiment_value
def sentiments_prediction(self,text):
score = analyzer_vader.polarity_scores(text)
sentiment_val = ''
if score['neu'] == 1.0:
sentiment_val = self.sentiment_validation(text)
elif score['compound'] > 0.25:
sentiment_val= 'Positive'
elif 0.25 >= score['compound'] >= 0.01:
if score['neu'] > 0.83:
sentiment_val = self.sentiment_validation(text)
else:
sentiment_val= 'Positive'
elif score['compound'] <= -0.5:
sentiment_val = 'Negative'
elif 0.0 > score['compound'] > -0.5:
if score['pos'] == 0.0:
sentiment_val = 'Negative'
else:
sentiment_val = self.sentiment_validation(text)
else:
sentiment_val = self.sentiment_validation(text)
return sentiment_val
@method_decorator(login_required, name = 'graphs')
class translate_class:
df = pd.DataFrame()
def __init__(self):
self.obj_classify_comm = main_classification()
# Create your views here.
def Home(self,request):
render(request, 'index.html')
def display_wordcloud(self,df_cloud):
df_cloud.dropna(inplace=True)
df_cloud.reset_index(drop=True, inplace=True)
file1 = open(r"./BI_Chat_Analysis/myfile.txt", "w", encoding="utf-8")
for i in range(len(df_cloud)):
file1 = open(r"./BI_Chat_Analysis/myfile.txt", "a", encoding="utf-8")
x = df_cloud.loc[i,'Translated_Comments']
file1.write(x)
file1.close()
doc=nlp(open(r"./BI_Chat_Analysis/myfile.txt",encoding="latin-1").read())
words = [token.text for token in doc if not token.is_stop and not token.is_punct and not token.is_space]
words = [word.lower() for word in words]
unwanted = {'chatbot', 'bot', 'chat', 'phone' , 'hear', 'process', 'wait','time'}
words = [ele for ele in words if ele not in unwanted]
word_freq = Counter(words)
common_words = word_freq.most_common(50)
''' Convert to dict to be read by j3'''
list_word = []
for i in range(len(common_words)):
key = common_words[i][0]
val = common_words[i][1]
list_word.append({'word' : key , 'size' : val})
return list_word
def graphs(self,request):
contact_type = translate_class.df['Contact Type'].unique()
total = []
positive =[]
negative = []
for i in range(len(contact_type)):
temp_df = translate_class.df.loc[translate_class.df['Contact Type'] == contact_type[i]]
total.append(temp_df.shape[0])
positive.append(temp_df.loc[temp_df['Sentiments']=='Positive'].shape[0])
negative.append(temp_df.loc[temp_df['Sentiments']=='Negative'].shape[0])
c_type = contact_type.tolist()
return render(request,"dashboard.html", {'total':total,'positive':positive,'negative' :negative})
def graphs_contact_type(self,request):
if request.method == "POST":
contact_type = request.POST.get('channel_input')
if contact_type:
filtered_df = translate_class.df[(translate_class.df['Contact Type'] == contact_type)]
satisfaction_values = filtered_df['Satisfaction Level'].value_counts().to_dict()
category_values = filtered_df['Category'].value_counts().to_dict()
sentiment_values = filtered_df['Sentiments'].value_counts().to_dict()
satisfaction_list = list(satisfaction_values.values())
category_list = list(category_values.values())
sentiments_list = list(sentiment_values.values())
satisfaction_keys = list(satisfaction_values.keys())
category_keys = list(category_values.keys())
sentiment_keys = list(sentiment_values.keys())
total_category = sum(category_list)
df_wordcloud = filtered_df[['Translated_Comments']]
wordcloud = self.display_wordcloud(df_wordcloud)
return render(request,"channel_wise.html", {'sent_data':sentiments_list,'sent_keys':sentiment_keys,'sat_data' :satisfaction_list,'cat_keys_graph':category_keys,'sat_keys' :satisfaction_keys, 'cat_data':category_list, 'category_sum' : total_category,'wordcloud' : wordcloud})
satisfaction_values = translate_class.df['Satisfaction Level'].value_counts().to_dict()
category_values = translate_class.df['Category'].value_counts().to_dict()
sentiment_values = translate_class.df['Sentiments'].value_counts().to_dict()
satisfaction_list = list(satisfaction_values.values())
category_list = list(category_values.values())
sentiments_list = list(sentiment_values.values())
satisfaction_keys = list(satisfaction_values.keys())
category_keys = list(category_values.keys())
sentiment_keys = list(sentiment_values.keys())
total_category = sum(category_list)
df_wordcloud = translate_class.df[['Translated_Comments']]
wordcloud = self.display_wordcloud(df_wordcloud)
return render(request,"channel_wise.html", {'sent_data':sentiments_list,'sent_keys':sentiment_keys,'sat_data' :satisfaction_list,'cat_keys_graph':category_keys,'sat_keys' :satisfaction_keys, 'cat_data':category_list, 'category_sum' : total_category,'wordcloud' : wordcloud})
def detect_lang(self,doc):
doc = nlp(doc)
lang = doc._.language['language']
return lang
def translate_comments(self,request):
if request.method == "POST":
comment_file = request.FILES.get('myfile')
if comment_file.name.endswith('.csv'):
df1 = pd.read_csv(comment_file,sheet_name='Survey')
df2 = | pd.read_csv(comment_file,sheet_name='Comments') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.