prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.utils import check_random_state
from scipy.linalg import block_diag
import matplotlib.pylab as plt
import matplotlib
from Machine_Learning_for_Asset_Managers import ch2_marcenko_pastur_pdf as mp
'''
Optimal Number of Clusters (ONC Algorithm)
Detection of False Investment Strategies using Unsupervised Learning Methods
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3167017
'''
'''codesnippet 4.1
base clustering: Evaluate the correlation matrix as distance matrix,
the find cluster; in the inner loop, we try different k=2..N
on which to cluster with kmeans for one given initialization,
and evaluate q = E(silhouette)/std(silhouette) for all clusters.
The outer loop repeats inner loop with initializations of
_different centroid seeds_
kmeans.labels_ is the assignment of members to the cluster
[0 1 1 0 0]
[1 0 0 1 1] is equivelant
'''
def clusterKMeansBase(corr0, maxNumClusters=10, n_init=10, debug=False):
corr0[corr0 > 1] = 1
dist_matrix = ((1-corr0.fillna(0))/2.)**.5
silh_coef_optimal = pd.Series(dtype='float64') #observations matrixs
kmeans, stat = None, None
maxNumClusters = min(maxNumClusters, int(np.floor(dist_matrix.shape[0]/2)))
print("maxNumClusters"+str(maxNumClusters))
for init in range(0, n_init):
#The [outer] loop repeats the first loop multiple times, thereby obtaining different initializations. Ref: <NAME> and Lewis (2018)
#DETECTION OF FALSE INVESTMENT STRATEGIES USING UNSUPERVISED LEARNING METHODS
for num_clusters in range(2, maxNumClusters+1):
#(maxNumClusters + 2 - num_clusters) # go in reverse order to view more sub-optimal solutions
kmeans_ = KMeans(n_clusters=num_clusters, n_init=10) #, random_state=3425) #n_jobs=None #n_jobs=None - use all CPUs
kmeans_ = kmeans_.fit(dist_matrix)
silh_coef = silhouette_samples(dist_matrix, kmeans_.labels_)
stat = (silh_coef.mean()/silh_coef.std(), silh_coef_optimal.mean()/silh_coef_optimal.std())
# If this metric better than the previous set as the optimal number of clusters
if np.isnan(stat[1]) or stat[0] > stat[1]:
silh_coef_optimal = silh_coef
kmeans = kmeans_
if debug==True:
print(kmeans)
print(stat)
silhouette_avg = silhouette_score(dist_matrix, kmeans_.labels_)
print("For n_clusters ="+ str(num_clusters)+ "The average silhouette_score is :"+ str(silhouette_avg))
print("********")
newIdx = np.argsort(kmeans.labels_)
#print(newIdx)
corr1 = corr0.iloc[newIdx] #reorder rows
corr1 = corr1.iloc[:, newIdx] #reorder columns
clstrs = {i:corr0.columns[np.where(kmeans.labels_==i)[0]].tolist() for i in np.unique(kmeans.labels_)} #cluster members
silh_coef_optimal = | pd.Series(silh_coef_optimal, index=dist_matrix.index) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
tm.assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
tm.assert_series_equal(result, expected)
def test_apply_trivial():
# GH 20066
# trivial apply: ignore input and return a constant dataframe.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df.iloc[1:], df.iloc[1:]],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df.iloc[1:])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH#20066; function passed into apply "
"returns a DataFrame with the same index "
"as the one to create GroupBy object.",
strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
# with the one used to create GroupBy object.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df, df],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df)
tm.assert_frame_equal(result, expected)
def test_fast_apply():
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
assert not mutated
def test_apply_with_mixed_dtype():
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': np.random.randn(6),
'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
result = df.apply(lambda x: x, axis=1)
tm.assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
tm.assert_series_equal(result1, result2)
def test_groupby_as_index_apply(df):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
tm.assert_index_equal(res_as, exp)
tm.assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
tm.assert_index_equal(res_as_apply, exp_as_apply)
tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
tm.assert_index_equal(res, ind)
def test_apply_concat_preserve_names(three_group):
grouped = three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
assert result.index.names == ('A', 'B', 'stat')
result2 = grouped.apply(desc2)
assert result2.index.names == ('A', 'B', 'stat')
result3 = grouped.apply(desc3)
assert result3.index.names == ('A', 'B', None)
def test_apply_series_to_frame():
def f(piece):
with np.errstate(invalid='ignore'):
logged = np.log(piece)
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': logged})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, ts.index)
def test_apply_series_yield_constant(df):
result = df.groupby(['A', 'B'])['C'].apply(len)
assert result.index.names[:2] == ('A', 'B')
def test_apply_frame_yield_constant(df):
# GH13568
result = df.groupby(['A', 'B']).apply(len)
assert isinstance(result, Series)
assert result.name is None
result = df.groupby(['A', 'B'])[['C', 'D']].apply(len)
assert isinstance(result, Series)
assert result.name is None
def test_apply_frame_to_series(df):
grouped = df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
tm.assert_index_equal(result.index, expected.index)
tm.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series():
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
tm.assert_series_equal(result, exp, check_names=False)
assert result.name == 'C'
def test_apply_transform(ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
tm.assert_series_equal(result, expected)
def test_apply_multikey_corner(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
tm.assert_frame_equal(result.loc[key], f(group))
def test_apply_chunk_view():
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': compat.lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict():
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': compat.lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort_values('value', inplace=True))
def test_apply_typecast_fail():
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(
['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_multiindex_fail():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_corner(tsframe):
result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = tsframe * 2
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import pandas as pd
import numpy as np
from evaluation.experiment import data_root_dir
all_root_dir = data_root_dir#os.path.expanduser('~/data/bayesian_sequence_combination')
data_root_dir = os.path.join(all_root_dir, 'data')
def _load_bio_folder(anno_path_root, folder_name):
'''
Loads one data directory out of the complete collection.
:return: dataframe containing the data from this folder.
'''
from data.pico.corpus import Corpus
DOC_PATH = os.path.join(data_root_dir, "bio-PICO/docs/")
ANNOTYPE = 'Participants'
anno_path = anno_path_root + folder_name
anno_fn = anno_path + '/PICO-annos-crowdsourcing.json'
gt_fn = anno_path + '/PICO-annos-professional.json'
corpus = Corpus(doc_path=DOC_PATH, verbose=False)
corpus.load_annotations(anno_fn, docids=None)
if os.path.exists(gt_fn):
corpus.load_groundtruth(gt_fn)
# get a list of the docids
docids = []
workerids = np.array([], dtype=str)
all_data = None
#all_fv = _load_pico_feature_vectors_from_file(corpus)
for d, docid in enumerate(corpus.docs):
docids.append(docid)
annos_d = corpus.get_doc_annos(docid, ANNOTYPE)
spacydoc = corpus.get_doc_spacydoc(docid)
text_d = spacydoc #all_fv[d]
doc_length = len(text_d)
doc_data = None
for workerid in annos_d:
print('Processing data for doc %s and worker %s' % (docid, workerid))
if workerid not in workerids:
workerids = np.append(workerids, workerid)
# add the worker to the dataframe if not already there
if doc_data is None or workerid not in doc_data:
doc_data_w = np.ones(doc_length, dtype=int) # O tokens
if doc_data is None:
doc_data = pd.DataFrame(doc_data_w, columns=[workerid])
else:
doc_data_w = doc_data[workerid]
for span in annos_d[workerid]:
start = span[0]
fin = span[1]
doc_data_w[start] = 2
doc_data_w[start + 1:fin] = 0
doc_data[workerid] = doc_data_w
if os.path.exists(gt_fn):
gold_d = corpus.get_doc_groundtruth(docid, ANNOTYPE)
if 'gold' not in doc_data:
doc_data['gold'] = np.ones(doc_length, dtype=int)
for spans in gold_d:
start = spans[0]
fin = spans[1]
doc_data['gold'][start] = 2
doc_data['gold'][start + 1:fin] = 0
else:
doc_data['gold'] = np.zeros(doc_length, dtype=int) - 1 # -1 for missing gold values
text_d = [spacytoken.text for spacytoken in text_d]
doc_data['features'] = text_d
doc_start = np.zeros(doc_length, dtype=int)
doc_start[0] = 1
doc_gaps = doc_data['features'] == '\n\n' # sentence breaks
doc_start[doc_gaps[doc_gaps].index[:-1] + 1] = 1
doc_data['doc_start'] = doc_start
# doc_data = doc_data.replace(r'\n', ' ', regex=True)
doc_data = doc_data[np.invert(doc_gaps)]
doc_data['docid'] = docid
if all_data is None:
all_data = doc_data
else:
all_data = pd.concat([all_data, doc_data], axis=0)
# print('breaking for fast debugging')
# break
return all_data, workerids
def load_biomedical_data(regen_data_files, debug_subset_size=None, data_folder='bio'):
savepath = os.path.join(data_root_dir, data_folder)
if not os.path.isdir(savepath):
os.mkdir(savepath)
if regen_data_files or not os.path.isfile(savepath + '/annos.csv'):
print(regen_data_files)
print(os.path.isfile(savepath + '/annos.csv'))
anno_path_root = os.path.join(data_root_dir, 'bio-PICO/annos/')
# There are four folders here:
# acl17-test: the only one containing 'professional' annos. 191 docs
# train: 3549 docs
# dev: 500 docs
# test: 500 docs
folders_to_load = ['acl17-test', 'train', 'test', 'dev']
all_data = None
all_workerids = None
for folder in folders_to_load:
print('Loading folder %s' % folder)
folder_data, workerids = _load_bio_folder(anno_path_root, folder)
if all_data is None:
all_data = folder_data
all_workerids = workerids
else:
all_data = pd.concat([all_data, folder_data])
all_workerids = np.unique(np.append(workerids.flatten(), all_workerids.flatten()))
all_data.to_csv(savepath + '/annos.csv', columns=all_workerids, header=False, index=False)
all_data.to_csv(savepath + '/gt.csv', columns=['gold'], header=False, index=False)
all_data.to_csv(savepath + '/doc_start.csv', columns=['doc_start'], header=False, index=False)
all_data.to_csv(savepath + '/text.csv', columns=['features'], header=False, index=False)
print('loading annos...')
annos = pd.read_csv(savepath + '/annos.csv', header=None, nrows=debug_subset_size)
annos = annos.fillna(-1)
annos = annos.values
#np.genfromtxt(savepath + '/annos.csv', delimiter=',')
print('loading features data...')
text = pd.read_csv(savepath + '/text.csv', skip_blank_lines=False, header=None, nrows=debug_subset_size)
text = text.fillna(' ').values
print('loading doc starts...')
doc_start = pd.read_csv(savepath + '/doc_start.csv', header=None, nrows=debug_subset_size).values #np.genfromtxt(savepath + '/doc_start.csv')
print('Loaded %i documents' % np.sum(doc_start))
print('loading ground truth labels...')
gt = pd.read_csv(savepath + '/gt.csv', header=None, nrows=debug_subset_size).values # np.genfromtxt(savepath + '/gt.csv')
if len(text) == len(annos) - 1:
# sometimes the last line of features is blank and doesn't get loaded into features, but doc_start and gt contain labels
# for the newline token
annos = annos[:-1]
doc_start = doc_start[:-1]
gt = gt[:-1]
print('Creating dev/test split...')
# since there is no separate validation set, we split the test set
ndocs = np.sum(doc_start & (gt != -1))
#testdocs = np.random.randint(0, ndocs, int(np.floor(ndocs * 0.5)))
ntestdocs = int(np.floor(ndocs * 0.5))
docidxs = np.cumsum(doc_start & (gt != -1)) # gets us the doc ids
# # testidxs = np.in1d(docidxs, testdocs)
ntestidxs = np.argwhere(docidxs == (ntestdocs+1))[0][0]
# The first half of the labelled data is used as dev, second half as test
gt_test = np.copy(gt)
gt_test[ntestidxs:] = -1
gt_dev = np.copy(gt)
gt_dev[:ntestidxs] = -1
doc_start_dev = doc_start[gt_dev != -1]
text_dev = text[gt_dev != -1]
gt_task1_dev = gt_dev
gt_dev = gt_dev[gt_dev != -1]
return gt_test, annos, doc_start, text, gt_task1_dev, gt_dev, doc_start_dev, text_dev
def _map_ner_str_to_labels(arr):
arr = arr.astype(str)
arr[arr == 'O'] = 1
arr[arr == 'B-ORG'] = 2
arr[arr == 'I-ORG'] = 0
arr[arr == 'B-PER'] = 4
arr[arr == 'I-PER'] = 3
arr[arr == 'B-LOC'] = 6
arr[arr == 'I-LOC'] = 5
arr[arr == 'B-MISC'] = 8
arr[arr == 'I-MISC'] = 7
arr[arr == '?'] = -1
try:
arr_ints = arr.astype(int)
except:
print("Could not map all annos to integers. The annos we found were:")
uannos = []
for anno in arr:
if anno not in uannos:
uannos.append(anno)
print(uannos)
return arr_ints
def _load_rodrigues_annotations(dir, worker_str, gold_char_idxs=None, gold_tokens=None, skip_imperfect_matches=False):
worker_data = None
for f in os.listdir(dir):
if not f.endswith('.txt'):
continue
doc_str = f.split('.')[0]
f = os.path.join(dir, f)
#print('Processing %s' % f)
new_data = pd.read_csv(f, names=['features', worker_str], skip_blank_lines=False,
dtype={'features':str, worker_str:str}, na_filter=False, delim_whitespace=True)
doc_gaps = (new_data['features'] == '') & (new_data[worker_str] == '')
doc_start = np.zeros(doc_gaps.shape[0], dtype=int)
doc_start[doc_gaps[:-1][doc_gaps[:-1]].index + 1] = 1 # the indexes after the gaps
doc_content = new_data['features'] != ''
new_data['doc_start'] = doc_start
new_data = new_data[doc_content]
new_data['doc_start'].iat[0] = 1
annos_to_keep = np.ones(new_data.shape[0], dtype=bool)
for t, tok in enumerate(new_data['features']):
if len(tok.split('/')) > 1:
tok = tok.split('/')[0]
new_data['features'].iat[t] = tok
if len(tok) == 0:
annos_to_keep[t] = False
# compare the tokens in the worker annos to the gold labels. They are misaligned in the dataset. We will
# skip labels in the worker annos that are assigned to only a part of a token in the gold dataset.
char_counter = 0
gold_tok_idx = 0
skip_sentence = False
sentence_start = 0
if gold_char_idxs is not None:
gold_chars = np.array(gold_char_idxs[doc_str])
last_accepted_tok = ''
last_accepted_idx = -1
for t, tok in enumerate(new_data['features']):
if skip_imperfect_matches and skip_sentence:
new_data[worker_str].iloc[t] = -1
if new_data['doc_start'].iat[t]:
skip_sentence = False
if new_data['doc_start'].iat[t]:
sentence_start = t
gold_char_idx = gold_chars[gold_tok_idx]
gold_tok = gold_tokens[doc_str][gold_tok_idx]
#print('tok = %s, gold_tok = %s' % (tok, gold_tok))
if not annos_to_keep[t]:
continue # already marked as skippable
if char_counter < gold_char_idx and \
(last_accepted_tok + tok) in gold_tokens[doc_str][gold_tok_idx-1]:
print('Correcting misaligned annos (split word in worker data): %i, %s' % (t, tok))
skip_sentence = True
last_accepted_tok += tok
annos_to_keep[last_accepted_idx] = False # skip the previous ones until the end
new_data['features'].iat[t] = last_accepted_tok
new_data['doc_start'].iat[t] = new_data['doc_start'].iat[last_accepted_idx]
last_accepted_idx = t
char_counter += len(tok)
elif tok not in gold_tok or (tok == '' and gold_tok != ''):
print('Correcting misaligned annos (spurious features in worker data): %i, %s vs. %s' % (t, tok, gold_tok))
skip_sentence = True
annos_to_keep[t] = False # skip the previous ones until the end
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
elif tok == gold_tok[:len(tok)]: # needs to match the first characters in the string, not just be there somewhere
gold_tok_idx += 1
if tok != gold_tok:
skip_sentence = True
while char_counter > gold_char_idx:
print('error in features alignment between worker and gold!')
len_to_skip = gold_chars[gold_tok_idx - 1] - gold_chars[gold_tok_idx - 2]
# move the gold counter along to the next token because gold is behind
gold_tok_idx += 1
gold_chars[gold_tok_idx:] -= len_to_skip
gold_char_idx = gold_chars[gold_tok_idx]
gold_char_idxs[doc_str] = gold_chars
last_accepted_tok = tok
last_accepted_idx = t
char_counter += len(tok)
else:
skip_sentence = True
annos_to_keep[t] = False
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
# no more features in this document, but the last sentence must be skipped
if skip_imperfect_matches and skip_sentence:
# annos_to_keep[sentence_start:t+1] = False
new_data[worker_str].iloc[sentence_start:t+1] = -1
new_data = new_data[annos_to_keep]
new_data[worker_str] = _map_ner_str_to_labels(new_data[worker_str])
new_data['doc_id'] = doc_str
new_data['tok_idx'] = np.arange(new_data.shape[0])
# add to data from this worker
if worker_data is None:
worker_data = new_data
else:
worker_data = pd.concat([worker_data, new_data])
return worker_data
def _load_rodrigues_annotations_all_workers(annotation_data_path, gold_data, skip_dirty=False):
worker_dirs = os.listdir(annotation_data_path)
data = None
annotator_cols = np.array([], dtype=str)
char_idx_word_starts = {}
chars = {}
char_counter = 0
for t, tok in enumerate(gold_data['features']):
if gold_data['doc_id'].iloc[t] not in char_idx_word_starts:
char_counter = 0
starts = []
toks = []
char_idx_word_starts[gold_data['doc_id'].iloc[t]] = starts
chars[gold_data['doc_id'].iloc[t]] = toks
starts.append(char_counter)
toks.append(tok)
char_counter += len(tok)
for widx, dir in enumerate(worker_dirs):
if dir.startswith("."):
continue
worker_str = dir
annotator_cols = np.append(annotator_cols, worker_str)
dir = os.path.join(annotation_data_path, dir)
print('Processing dir for worker %s (%i of %i)' % (worker_str, widx, len(worker_dirs)))
worker_data = _load_rodrigues_annotations(dir, worker_str,
char_idx_word_starts, chars, skip_dirty)
print("Loaded a dataset of size %s" % str(worker_data.shape))
# now need to join this to other workers' data
if data is None:
data = worker_data
else:
data = data.merge(worker_data, on=['doc_id', 'tok_idx', 'features', 'doc_start'], how='outer', sort=True, validate='1:1')
return data, annotator_cols
def IOB_to_IOB2(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in I_labels:
typeidx = np.argwhere(I_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] and seq[i-1] != label):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = B_labels[typeidx]
return seq
def IOB2_to_IOB(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in B_labels:
typeidx = np.argwhere(B_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] or seq[i-1] != I_labels[typeidx]):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = I_labels[typeidx]
return seq
def load_ner_data(regen_data_files, skip_sen_with_dirty_data=False):
# In Nguyen et al 2017, the original data has been separated out for task 1, aggregation of crowd labels. In this
# task, the original training data is further split into val and test -- to make our results comparable with Nguyen
# et al, we need to test on the test split for task 1, but train our model on both.
# To make them comparable with Rodrigues et al. 2014, we need to test on all data (check this in their paper).
# Task 2 is for prediction on a test set given a model trained on the training set and optimised on the validation
# set. It would be ideal to show both these results...
savepath = os.path.join(data_root_dir, 'ner') # location to save our csv files to
if not os.path.isdir(savepath):
os.mkdir(savepath)
# within each of these folders below is an mturk_train_data folder, containing crowd labels, and a ground_truth
# folder. Rodrigues et al. have assigned document IDs that allow us to match up the annos from each worker.
# Nguyen et al. have split the training set into the val/test folders for task 1. Data is otherwise the same as in
# the Rodrigues folder under mturk/extracted_data.
task1_val_path = os.path.join(data_root_dir, 'crf-ma-NER-task1/val/')
task1_test_path = os.path.join(data_root_dir, 'crf-ma-NER-task1/test')
# These are just two files that we use for features features + ground truth labels.
task2_val_path = os.path.join(data_root_dir, 'English NER/eng.testa')
task2_test_path = os.path.join(data_root_dir, 'English NER/eng.testb')
if regen_data_files or not os.path.isfile(savepath + '/task1_val_annos.csv'):
# Steps to load data (all steps need to map annos to consecutive integer labels).
# 1. Create an annos.csv file containing all the annos in task1_val_path and task1_test_path.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(os.path.join(task1_val_path, 'ground_truth/'), 'gold')
# load the validation data
data, annotator_cols = _load_rodrigues_annotations_all_workers(
os.path.join(task1_val_path, 'mturk_train_data/'),
gold_data, skip_sen_with_dirty_data)
# 2. Create ground truth CSV for task1_val_path (for tuning the LSTM)
# merge gold with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'features'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annos per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annos
if len(np.unique(counts)) > 1:
print('Validation data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annos.' % doc)
# remove any lines with no annos
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_val_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the features in same order
data.to_csv(savepath + '/task1_val_text.csv', columns=['features'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_val_gt.csv', columns=['gold'], header=False, index=False)
# 3. Load worker annos for test set.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(
os.path.join(task1_test_path, 'ground_truth/'), 'gold')
# load the test data
data, annotator_cols = _load_rodrigues_annotations_all_workers(
os.path.join(task1_test_path, 'mturk_train_data/'),
gold_data, skip_sen_with_dirty_data)
# 4. Create ground truth CSV for task1_test_path
# merge with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'features'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annos per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annos
if len(np.unique(counts)) > 1:
print('Test data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annos.' % doc)
# remove any lines with no annos
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_test_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the features in same order
data.to_csv(savepath + '/task1_test_text.csv', columns=['features'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_test_gt.csv', columns=['gold'], header=False, index=False)
# 5. Create a file containing only the words for the task 2 validation set, i.e. like annos.csv with no annos.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_val_path but blank out the task_1 labels
# (for tuning the LSTM for task 2)
import csv
eng_val = pd.read_csv(task2_val_path, delimiter=' ', usecols=[0,3], names=['features', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_val.shape[0])
docstart_token = eng_val['features'][0]
doc_starts[1:] = (eng_val['features'] == docstart_token)[:-1]
eng_val['doc_start'] = doc_starts
eng_val['tok_idx'] = eng_val.index
eng_val = eng_val[eng_val['features'] != docstart_token] # remove all the docstart labels
eng_val['gold'] = _map_ner_str_to_labels(eng_val['gold'])
eng_val['gold'] = IOB_to_IOB2(eng_val['gold'].values)
eng_val.to_csv(savepath + '/task2_val_gt.csv', columns=['gold'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_text.csv', columns=['features'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 6. Create a file containing only the words for the task 2 test set, i.e. like annos.csv with no annos.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_test_path but blank out the task_1 labels/
eng_test = pd.read_csv(task2_test_path, delimiter=' ', usecols=[0,3], names=['features', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_test.shape[0])
docstart_token = eng_test['features'][0]
doc_starts[1:] = (eng_test['features'] == docstart_token)[:-1]
eng_test['doc_start'] = doc_starts
eng_test['tok_idx'] = eng_test.index
eng_test = eng_test[eng_test['features'] != docstart_token] # remove all the docstart labels
eng_test['gold'] = _map_ner_str_to_labels(eng_test['gold'])
eng_test['gold'] = IOB_to_IOB2(eng_test['gold'].values)
eng_test.to_csv(savepath + '/task2_test_gt.csv', columns=['gold'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_text.csv', columns=['features'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 7. Reload the data for the current run...
print('loading annos for task1 test...')
annos = pd.read_csv(savepath + '/task1_test_annos.csv', skip_blank_lines=False)
print('loading features data for task1 test...')
text = pd.read_csv(savepath + '/task1_test_text.csv', skip_blank_lines=False, header=None)
print('loading doc_starts for task1 test...')
doc_start = pd.read_csv(savepath + '/task1_test_doc_start.csv', skip_blank_lines=False, header=None)
print('loading ground truth for task1 test...')
gt_t = pd.read_csv(savepath + '/task1_test_gt.csv', skip_blank_lines=False, header=None)
print('Unique labels: ')
print(np.unique(gt_t))
print(gt_t.shape)
print('loading annos for task1 val...')
annos_v = pd.read_csv(savepath + '/task1_val_annos.csv', skip_blank_lines=False)
# remove any lines with no annos
# annotated_idxs = np.argwhere(np.any(annos_v != -1, axis=1)).flatten()
# annos_v = annos_v.iloc[annotated_idxs, :]
annos = pd.concat((annos, annos_v), axis=0)
annos = annos.fillna(-1)
annos = annos.values
print('loaded annos for %i tokens' % annos.shape[0])
print('loading features data for task1 val...')
text_v = pd.read_csv(savepath + '/task1_val_text.csv', skip_blank_lines=False, header=None)
# text_v = text_v.iloc[annotated_idxs]
text = pd.concat((text, text_v), axis=0)
text = text.fillna(' ').values
print('loading doc_starts for task1 val...')
doc_start_v = | pd.read_csv(savepath + '/task1_val_doc_start.csv', skip_blank_lines=False, header=None) | pandas.read_csv |
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from Bio import SeqIO
from six import StringIO
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio.SeqUtils.ProtParam import ProtParamData
from modlamp.plot import helical_wheel
# Protparam scales:
# kd → Kyte & Doolittle Index of Hydrophobicity
# Flex → Normalized average flexibility parameters (B-values)
# hw → Hopp & Wood Index of Hydrophilicity
# em → Emini Surface fractional probability (Surface Accessibility)
aalist = ['A','C', 'D','E',
'F','G', 'H','I',
'K','L', 'M','N',
'P','Q', 'R','S',
'T','V', 'Y','W']
# Colour scheme in Lesk (Introduction to Bioinformatics)
# Uses 5 groups (note Histidine):
# Small nonpolar G, A, S, T Orange
# Hydrophobic C, V, I, L, P, F, Y, M, W Green
# Polar N, Q, H Magenta
# Negatively charged D, E Red
# Positively charged K, R Blue
colorpallete = {'G': 'orange', 'A': 'orange', 'S': 'orange', 'T': 'orange',
'C': 'g', 'V': 'g', 'I': 'g', 'L': 'g',
'P': 'g', 'F': 'g', 'Y': 'g', 'M': 'g',
'W': 'g', 'N': 'm', 'Q': 'm', 'H': 'm',
'D': 'r', 'E': 'r', 'K': 'b', 'R': 'b'}
# these hydrophobicity scales are minmax organized
# the higher the value, more hydrophobic the aa is
scales = {'Cowan': {'W': 0.879, 'F': 0.965, 'L': 0.992,
'I': 1.0, 'M': 0.817, 'V': 0.872,
'Y': 0.46, 'C': 0.731, 'P': 0.751,
'A': 0.628, 'H': 0.377, 'R': 0.163,
'T': 0.472, 'G': 0.54, 'K': 0.153,
'Q': 0.307, 'S': 0.382, 'N': 0.291,
'E': 0.05, 'D': 0.0},
'Kovacs_a': {'W': 1.0, 'F': 0.916, 'L': 0.76,
'I': 0.707, 'M': 0.551, 'V': 0.486,
'Y': 0.514, 'C': 0.318, 'P': 0.355,
'A': 0.174, 'H': 0.19, 'R': 0.174,
'T': 0.174, 'G': 0.056, 'K': 0.0,
'Q': 0.103, 'S': 0.09, 'N': 0.084,
'E': 0.044, 'D': 0.034},
'Kovacs_b': {'W': 1.0, 'F': 0.931, 'L': 0.792,
'I': 0.74, 'M': 0.59, 'V': 0.538,
'Y': 0.549, 'C': 0.382, 'P': 0.422,
'A': 0.266, 'H': 0.266, 'R': 0.338,
'T': 0.243, 'G': 0.182, 'K': 0.266,
'Q': 0.182, 'S': 0.171, 'N': 0.165,
'E': 0.012, 'D': 0.0},
'Parker': {'W': 1.0, 'F': 0.96, 'L': 0.96,
'M': 0.71, 'V': 0.685, 'Y': 0.595,
'C': 0.43, 'P': 0.395, 'A': 0.395,
'H': 0.395, 'R': 0.29, 'T': 0.24,
'G': 0.215, 'K': 0.215, 'Q': 0.2,
'S': 0.175, 'N': 0.15, 'E': 0.11,
'D': 0.0, 'I': 0.9},
'Monerac': {'W': 0.983, 'F': 1.0, 'L': 0.983,
'I': 0.99, 'M': 0.833, 'V': 0.843,
'Y': 0.76, 'C': 0.67, 'P': 0.173,
'A': 0.62, 'H': 0.403, 'R': 0.263,
'T': 0.437, 'G': 0.357, 'K': 0.207,
'Q': 0.29, 'S': 0.323, 'N': 0.173,
'E': 0.157, 'D': 0.0},
'AVE3': {'W': 1.0, 'F': 0.969, 'L': 0.916, 'I': 0.882,
'M': 0.715, 'V': 0.693, 'Y': 0.639, 'C': 0.497,
'P': 0.333, 'A': 0.43, 'H': 0.357, 'R': 0.298,
'T': 0.309, 'G': 0.248, 'K': 0.225, 'Q': 0.231,
'S': 0.224, 'N': 0.163, 'E': 0.093, 'D': 0.0},
'ez': {'L': -4.92, 'I': -4.92, 'V': -4.04,
'F': -2.98, 'M': -2.35, 'W': -2.33,
'A': -1.81, 'C': -1.28, 'G': -0.94,
'Y': 0.14, 'T': 2.57, 'S': 3.4,
'H': 4.66, 'Q': 5.54, 'K': 5.55,
'N': 6.64, 'E': 6.81, 'D': 8.72,
'R': 14.92, 'P': 0.0}}
data_aa = {'AMP_mean': {'A': 7.520502564098912, 'C': 6.36633345571278,
'D': 2.668597822622926, 'E': 2.7141042077216704,
'F': 4.391951106649475, 'G': 10.675459550168885,
'H': 2.1711336118510483, 'I': 6.347409500480028,
'K': 9.861591967792371, 'L': 9.658690725226656,
'M': 1.2484935261695338, 'N': 3.7691909197648736,
'P': 4.580420590187392, 'Q': 2.4477634910400194,
'R': 5.545623161260059, 'S': 6.026451554176794,
'T': 4.151896217120581, 'V': 5.789255898915741,
'Y': 2.3433663843826027, 'W': 1.7217637446576457},
'AMP_std': {'A': 6.606149264041305, 'C': 7.243769785740786,
'D': 3.653517299310336, 'E': 3.627738452023139,
'F': 4.741961828857768, 'G': 7.367561399376693,
'H': 3.899314485554839, 'I': 5.552037685411282,
'K': 7.372780284577385, 'L': 8.756227525252548,
'M': 2.3385964346632746, 'N': 3.752206751253756,
'P': 6.03595086307002, 'Q': 3.29147871240816,
'R': 6.997662087778225, 'S': 4.907602757156398,
'T': 4.314364835800506, 'V': 4.980394942927765,
'Y': 3.2792515791237014, 'W': 3.1977236581945347},
'NAMP_mean': {'A': 8.5622274065478, 'C': 1.4789156615654058,
'D': 5.155744906305648, 'E': 6.795230159499449,
'F': 3.728852171144205, 'G': 7.14820582792835,
'H': 2.1557129065808085, 'I': 6.187847874241366,
'K': 7.068499469732919, 'L': 9.3359440472875,
'M': 1.9695515485179416, 'N': 3.837576140353241,
'P': 4.307095987596791, 'Q': 3.739296528690092,
'R': 6.329842863422612, 'S': 5.97177857365507,
'T': 5.164575553563543, 'V': 7.339571862896026,
'Y': 2.764380580261445, 'W': 0.9591499302097856},
'NAMP_std': {'A': 4.152347300212898, 'C': 2.41911267211069,
'D': 2.516020373325246, 'E': 3.314402093538308,
'F': 2.330314168513022, 'G': 3.297362816891616,
'H': 1.6512452826231296, 'I': 3.0490889819362645,
'K': 4.154914723867973, 'L': 3.6288667599165914,
'M': 1.5326492787082528, 'N': 2.279318863869867,
'P': 2.668229546280934, 'Q': 2.3496768850990324,
'R': 3.941947102442459, 'S': 2.835126542032928,
'T': 2.3327615292710866, 'V': 2.951465012361856,
'Y': 1.8416000916385065, 'W': 1.2009973185629197}}
data_aa = pd.DataFrame(data_aa)
def profile(header, sequence, out, size=7, wd=5, scale='ez'):
analyzed_seq = ProteinAnalysis(str(sequence))
val = analyzed_seq.protein_scale(window=wd, param_dict=scales[scale])
xval, xcolors = [], []
for i in range(0, len(val)):
s = sequence[i]
c = colorpallete[s]
xcolors.append(c)
s = f'{s}{i}'
xval.append(s)
plt.plot(xval, val)
plt.xticks(fontsize=size, rotation=90)
for i, c in enumerate(xcolors):
plt.gca().get_xticklabels()[i].set_color(c)
plt.xlabel('Window start position')
if scale == 'ez':
plt.axhline(y=0, color='gray', linestyle='--')
plt.ylabel('Transfer energy from water\nto lipid bilayer')
plt.savefig(f'{out}/EZenergy_{header}.png',
dpi=300)
else:
plt.axhline(y=0.5, color='gray', linestyle='--')
plt.ylabel(f'Scaled hydrophobicity - Scale {scale}')
plt.savefig(f'{out}/hydrophobicity_{scale}_{header}.png',
dpi=300)
plt.close()
def sstruc(seq, wd=5):
analyzed_seq = ProteinAnalysis(str(seq))
ss = analyzed_seq.secondary_structure_fraction() # helix, turn, sheet
profexp = analyzed_seq.protein_scale(window=wd, param_dict=ProtParamData.em)
proflex = analyzed_seq.protein_scale(window=wd, param_dict=ProtParamData.Flex)
L = analyzed_seq.length
extinc = analyzed_seq.molar_extinction_coefficient()
aroma = analyzed_seq.aromaticity()
gravy = analyzed_seq.gravy()
MW = analyzed_seq.molecular_weight()
charge_at_pH7 = analyzed_seq.charge_at_pH(7.0)
II = analyzed_seq.instability_index()
pI = analyzed_seq.isoelectric_point()
return [ss, profexp, proflex,
L, extinc, aroma,
gravy, MW,
charge_at_pH7,
II, pI]
def decompose(header, sequence, out):
l = len(sequence)
k = 100/l
seqfreq = []
for aa in aalist:
seqfreq.append(sequence.count(aa)*k)
df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function
import base64
import csv
import json
import sys
from collections import Counter
from functools import wraps
import numpy as np
import pandas as pd
import zerorpc
from nestor import keyword as kex
def exception_handler(func):
@wraps(func)
def func_or_exception(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print(f"Function '{func.__name__}' threw an exception:\n")
print(e)
sys.stdout.flush()
return e
return func_or_exception
class Api(object):
df = pd.DataFrame([])
vocab_single_df = pd.DataFrame([])
vocab_multi_df = pd.DataFrame([])
output_df = | pd.DataFrame([]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.filters.bk_filter import bkfilter
from statsmodels.tsa.filters import *
from statsmodels.tsa.filters.hp_filter import hpfilter
from statsmodels.tsa.stattools import grangercausalitytests
from statsmodels.tsa.stattools import kpss
from statsmodels.tsa.filters.cf_filter import cffilter
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.holtwinters import SimpleExpSmoothing
from datetime import date, timedelta
import datetime
# ----------- Utility Functions -----------
def rmse(data1, data2):
#data1 and data2 are numpy arrays
if len(data1) == len(data2):
return math.sqrt(sum((np.array(data1) - np.array(data2))**2)/len(data1))
else:
min_len = min([len(data1), len(data2)])
return math.sqrt(sum((np.array(data1[min_len:]) - np.array(data2[min_len:]))**2)/min_len)
def rolling_rmse(data1, data2, window = 3):
rolling_1 = data1.rolling(window)
rolling_2 = data2.rolling(window)
def generte_dates(start, end, length):
d1 = datetime.datetime.strptime(start, "%Y-%m-%d").date()
d2 = datetime.datetime.strptime(end, "%Y-%m-%d").date()
date_list = map(str, [d1 + timedelta(days=x) for x in range(0, (d2-d1).days + 1, 1)])
if length > len(date_list):
print("Date generation not possible")
else:
return random.choice(date_list, size = length, replace= False)
def impute_points(dataFrame, required_dates):
# dataFrame : data frame with index = datetime and one column "Close"
# required_dates : required dates in list of string
new_data = pd.DataFrame(np.nan, index=required_dates, columns = ["freq"])
new_data.reset_index(inplace=True)
new_data.columns = ["Date", "freq"]
new_data['Date'] = new_data['Date'].astype('datetime64[ns]')
new_data.set_index("Date", inplace=True)
new_data.update(dataFrame, overwrite = False)
new_data = new_data.interpolate(method = 'time', order = 4)
new_data.fillna(new_data.freq.mean(), inplace = True)
return new_data
# ----------- Stationarity Tests -----------
def aDickeyFuller(X):
# Test for Stationarity
result = adfuller(X, regression = 'ct')
output = {}
output["ADF Statistics"] = result[0]
output["p value"] = result[1]
output["Number of Lags Used"] = result[2]
output["Critical values"] = result[4]
return output
def kpss_test(X):
# Test for Stationarity
result = kpss(X, regression = 'ct')
output = {}
output["KPSS Statistics"] = result[0]
output["p value"] = result[1]
output["Number of Lags Used"] = result[2]
output["Critical values"] = result[3]
return output
# ----------- Smoothing Techniques -----------
def exp_smoothing(data):
model = ExponentialSmoothing(data, trend = "additive").fit(smoothing_level=0.1,optimized=True)
result = model.fittedvalues
return pd.DataFrame(result)
def simple_exp_smoothing(data):
model = SimpleExpSmoothing(data).fit(smoothing_level = 0.1, optimized = True)
result = model.fittedvalues
return pd.DataFrame(result)
def moving_average(data, window = 8):
#Left tailed
rolling = data.rolling(window = window)
result = rolling.mean()
return | pd.DataFrame(result) | pandas.DataFrame |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing, get_col_mapping_ce
class TestInverseTransformCaterogyEncoder(unittest.TestCase):
def test_inverse_transform_1(self):
"""
Test no preprocessing
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
original = inverse_transform(train)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_2(self):
"""
Test multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
test = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'ZZ'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'ZZ'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'ZZ'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', 'ZZ'],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'ZZ'],
'other': ['other', '123', np.nan]})
expected = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'missing'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'missing'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'missing'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', np.nan],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'NaN'],
'other': ['other', '123', np.nan]})
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
result1 = enc_onehot.transform(test)
result2 = enc_binary.transform(result1)
result3 = enc_ordinal.transform(result2)
result4 = enc_basen.transform(result3)
result5 = enc_target.transform(result4)
original = inverse_transform(result5, [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict])
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_3(self):
"""
Test target encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago', 'chicago'],
'state': ['US', 'FR', 'FR', 'US', 'US'],
'other': ['A', 'A', np.nan, 'B', 'B']})
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1, 0, 1], columns=['y'])
enc = ce.TargetEncoder(cols=['city', 'state']).fit(train, y)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_4(self):
"""
Test ordinal encoding
"""
train = pd.DataFrame({'city': ['chicago', 'st louis']})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_5(self):
"""
Test inverse_transform having Nan in train and handle missing value expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_6(self):
"""
test inverse_transform having Nan in train and handle missing return Nan expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_7(self):
"""
test inverse_transform both fields are return Nan with Nan Expect ValueError Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_8(self):
"""
test inverse_transform having missing and no Uknown expect inversed ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_9(self):
"""
test inverse_transform having handle missing value and handle unknown return Nan expect best inverse ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = enc.inverse_transform(result)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_10(self):
"""
test inverse_transform with multiple ordinal
"""
data = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['a', 'b']})
test = pd.DataFrame({'city': [1, 2, 2],
'state': [1, 2, 2],
'other': ['a', 'b', 'a']})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['a', 'b', 'a']})
enc = ce.OrdinalEncoder(cols=['city', 'state'])
enc.fit(data)
original = inverse_transform(test, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_11(self):
"""
Test binary encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'paris', 'monaco'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, 'B']})
expected = pd.DataFrame({'city': ['chicago', 'paris', np.nan],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, 'B']})
enc = ce.BinaryEncoder(cols=['city', 'state']).fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_12(self):
"""
test inverse_transform having data expecting a returned result
"""
train = pd.Series(list('abcd')).to_frame('letter')
enc = ce.BaseNEncoder(base=2)
result = enc.fit_transform(train)
inversed_result = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, inversed_result)
def test_inverse_transform_13(self):
"""
Test basen encoding
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_14(self):
"""
test inverse_transform having Nan in train and handle missing expected a result with Nan
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.BaseNEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_15(self):
"""
test inverse_transform having missing and no unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_16(self):
"""
test inverse_transform having handle missing value and Unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_17(self):
"""
test inverse_transform with multiple baseN
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
test = pd.DataFrame({'city_0': [0, 1],
'city_1': [1, 0],
'state_0': [0, 1],
'state_1': [1, 0]})
enc = ce.BaseNEncoder(cols=['city', 'state'], handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
original = inverse_transform(test, enc)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_18(self):
"""
Test Onehot encoding
"""
encoder = ce.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inversed_result = inverse_transform(transformed, encoder)
pd.testing.assert_frame_equal(value, inversed_result)
def test_inverse_transform_19(self):
"""
test inverse_transform having no categories names
"""
encoder = ce.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inversed_result = inverse_transform(transformed, encoder)
pd.testing.assert_frame_equal(value, inversed_result)
def test_inverse_transform_20(self):
"""
test inverse_transform with Nan in training expecting Nan_Onehot returned result
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OneHotEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_21(self):
"""
test inverse_transform with Nan in training expecting Nan_Onehot returned result
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OneHotEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_22(self):
"""
test inverse_transform with Both fields return_nan
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_23(self):
"""
test inverse_transform having missing and No Unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OneHotEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_24(self):
"""
test inverse_transform having handle missing value and Handle Unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]})
enc = ce.OneHotEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_25(self):
"""
Test dict encoding
"""
data = pd.DataFrame({'city': ['chicago', 'paris-1', 'paris-2'],
'state': ['US', 'FR-1', 'FR-2'],
'other': ['A', 'B', np.nan]})
expected = pd.DataFrame({'city': ['chicago', 'paris-1', 'paris-2'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', np.nan]})
input_dict = dict()
input_dict['col'] = 'state'
input_dict['mapping'] = pd.Series(data=['US', 'FR-1', 'FR-2'], index=['US', 'FR', 'FR'])
input_dict['data_type'] = 'object'
result = inverse_transform(data, input_dict)
pd.testing.assert_frame_equal(result, expected)
def test_inverse_transform_26(self):
"""
Test multiple dict encoding
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
test = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'ZZ'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'ZZ'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'ZZ'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', 'ZZ'],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'ZZ'],
'other': ['other', '123', np.nan]},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'missing'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'missing'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'missing'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', np.nan],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'NaN'],
'other': ['other', '123', np.nan]},
index=['index1', 'index2', 'index3'])
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
result1 = enc_onehot.transform(test)
result2 = enc_binary.transform(result1)
result3 = enc_ordinal.transform(result2)
result4 = enc_basen.transform(result3)
result5 = enc_target.transform(result4)
original = inverse_transform(result5, [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict])
pd.testing.assert_frame_equal(expected, original)
def test_transform_ce_1(self):
"""
Unit test for apply preprocessing on OneHotEncoder
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.one_hot.OneHotEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = cb.CatBoostClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.feature_names_ for column in result.columns]
assert all(expected.index == result.index)
def test_transform_ce_2(self):
"""
Unit test for apply preprocessing on OrdinalEncoder
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.ordinal.OrdinalEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = cb.CatBoostClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.feature_names_ for column in result.columns]
assert all(expected.index == result.index)
def test_transform_ce_3(self):
"""
Unit test for apply preprocessing on BaseNEncoder
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.basen.BaseNEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = cb.CatBoostClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.feature_names_ for column in result.columns]
assert all(expected.index == result.index)
def test_transform_ce_4(self):
"""
Unit test for apply preprocessing on BinaryEncoder
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.binary.BinaryEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = cb.CatBoostClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.feature_names_ for column in result.columns]
assert all(expected.index == result.index)
def test_transform_ce_5(self):
"""
Unit test for apply preprocessing with sklearn model
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.ordinal.OrdinalEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = sklearn.ensemble._gb.GradientBoostingClassifier().fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert all(expected.index == result.index)
def test_transform_ce_6(self):
"""
Unit test for apply preprocessing with catboost model
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.ordinal.OrdinalEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = cb.CatBoostClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.feature_names_ for column in result.columns]
assert all(expected.index == result.index)
def test_transform_ce_7(self):
"""
Unit test for apply preprocessing with lightgbm model
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.ordinal.OrdinalEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = lightgbm.sklearn.LGBMClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.booster_.feature_name() for column in result.columns]
assert all(expected.index == result.index)
def test_transform_ce_8(self):
"""
Unit test for apply preprocessing with xgboost model
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': [1, 0]})
enc = ce.ordinal.OrdinalEncoder(cols=["num1", "num2"])
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train))
clf = xgboost.sklearn.XGBClassifier(n_estimators=1).fit(train_preprocessed, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 0],
'other': [1, 0, 0]})
expected = pd.DataFrame(enc.transform(test), index=test.index)
result = apply_preprocessing(test, clf, enc)
assert result.shape == expected.shape
assert [column in clf.get_booster().feature_names for column in result.columns]
assert all(expected.index == result.index)
def test_get_col_mapping_ce_1(self):
"""
Test test_get_col_mapping_ce with target encoding
"""
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1], columns=['y'])
enc = ce.TargetEncoder(cols=['city', 'state'])
test_encoded = pd.DataFrame(enc.fit_transform(test, y))
mapping = get_col_mapping_ce(enc)
expected_mapping = {'city': ['city'], 'state': ['state']}
self.assertDictEqual(mapping, expected_mapping)
def test_get_col_mapping_ce_2(self):
"""
Test test_get_col_mapping_ce with target OrdinalEncoder
"""
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1], columns=['y'])
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
test_encoded = pd.DataFrame(enc.fit_transform(test, y))
mapping = get_col_mapping_ce(enc)
expected_mapping = {'city': ['city'], 'state': ['state'], 'other': ['other']}
self.assertDictEqual(mapping, expected_mapping)
def test_get_col_mapping_ce_3(self):
"""
Test test_get_col_mapping_ce with target BinaryEncoder
"""
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1], columns=['y'])
enc = ce.BinaryEncoder(cols=['city', 'state'])
test_encoded = pd.DataFrame(enc.fit_transform(test, y))
mapping = get_col_mapping_ce(enc)
expected_mapping = {'city': ['city_0', 'city_1'], 'state': ['state_0', 'state_1']}
self.assertDictEqual(mapping, expected_mapping)
def test_get_col_mapping_ce_4(self):
"""
Test test_get_col_mapping_ce with target BaseNEncoder
"""
test = pd.DataFrame({'city': ['chicago', 'paris', 'new york'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = | pd.DataFrame(data=[0, 1, 1], columns=['y']) | pandas.DataFrame |
import numpy as np
import rasterio as rio
import geopandas as gpd
import pandas as pd
import random
#from osgeo import gdal, ogr, osr
from rasterio.mask import mask
from shapely.geometry import mapping, Polygon
from skimage.util import img_as_float
import os as os
os.chdir('E:/SLICUAV_manuscript_code/3_Landscape_mapping/2019_10_23_1_compute_superpixel_features')
# import machinery for this
from trees.clusterfeatures import ClusterFeatures
grid_shps = gpd.read_file('E:/SLICUAV_manuscript_data/3_Clipped_OMs/'+
'2019_08_30_basecamp_grid/'+
'2019_08_30_basecamp_50m_grid.shp')
ftprnt_shp = gpd.read_file('E:/SLICUAV_manuscript_data/7_Harapan_shapefiles/'+
'2019_09_19_basecamp_footprint_both_years_latlong.shp')
flag = True
#for shp_i in range(all_shps.shape[0]):
for i in range(50):
shp_flag = True
random.seed(42)
shp_i = i + 450
# Get unique tag for this block
ths_id = grid_shps['id'][shp_i]
# load images
rgbtif = rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_RGB.tif')
rgbimg = rgbtif.read()
# Reorder correctly as first dimension is bands
rgbimg = np.swapaxes(rgbimg,0,2)
rgbimg = np.swapaxes(rgbimg,0,1)
rgbtif.close()
mstif = rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_MS.tif')
msimg = mstif.read()
# Reorder correctly as first dimension is bands
msimg = np.swapaxes(msimg,0,2)
msimg = np.swapaxes(msimg,0,1)
mstif.close()
dsmtif = rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_DSM.tif')
dsmimg = dsmtif.read()
# Reorder correctly as first dimension is bands
dsmimg = np.swapaxes(dsmimg,0,2)
dsmimg = np.swapaxes(dsmimg,0,1)
dsmtif.close()
# Remove redundant third axis
dsmimg = np.squeeze(dsmimg)
# Deal with any missing value set to arbitrary negative number
dsmimg[dsmimg<-1000]=0
### scale both actual images to 0-1
rgbimg = img_as_float(rgbimg)
msimg = msimg/65535
# read in the segmented shapes
cur_segs = gpd.read_file('E:/SLICUAV_manuscript_data/5_Landscape_superpixels/'+\
str(ths_id) +'_SLIC_5000.shp')
seg_flag = True
ticker = 0
for seg_i in range(cur_segs.shape[0]):
ths_shp = []
# check if it's in the area for which we collected data
if not ftprnt_shp.intersects(Polygon(cur_segs['geometry'][seg_i]))[0]:
ticker += 1
continue
tmp_gjson = mapping(cur_segs['geometry'][seg_i])
ths_shp.append(tmp_gjson)
del tmp_gjson
# Get RGB mask
with rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_RGB.tif') as gtif:
rgb_clip, clip_affine = mask(gtif,ths_shp,crop=False,all_touched=True)
rgb_clip = np.swapaxes(rgb_clip,0,2)
rgb_clip = np.swapaxes(rgb_clip,0,1)
rgb_mask = np.nonzero(rgb_clip.sum(axis=2))
del rgb_clip, clip_affine
# Get MS mask
with rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_MS.tif') as gtif:
ms_clip, clip_affine = mask(gtif,ths_shp,crop=False,all_touched=True)
ms_clip = np.swapaxes(ms_clip,0,2)
ms_clip = np.swapaxes(ms_clip,0,1)
ms_clip[ms_clip>65535]=0
ms_mask = np.nonzero(ms_clip.sum(axis=2))
del ms_clip, clip_affine
# Get DSM mask
with rio.open('E:/SLICUAV_manuscript_data/3_Clipped_OMs/2019_09_19_basecamp_grid_with_buffer/all_clips/'+
'id_' + str(ths_id) + '_DSM.tif') as gtif:
dsm_clip, clip_affine = mask(gtif,ths_shp,crop=False,all_touched=True)
dsm_clip = np.swapaxes(dsm_clip,0,2)
dsm_clip = np.swapaxes(dsm_clip,0,1)
dsm_mask = np.nonzero(dsm_clip.sum(axis=2))
del dsm_clip, clip_affine
feat_struct = ClusterFeatures(shp_i,'NA',rgbimg,rgb_mask,msimg,ms_mask,dsmimg,dsm_mask)
feat_struct.runFeaturePipeline(thresh=0.5,glcm_steps=3,acor_steps=3,mode=False,HSV=True)
feat_vec = feat_struct.featStack
del rgb_mask, ms_mask, dsm_mask, ths_shp
if flag:
pd.DataFrame(feat_struct.featList).to_csv('E:/SLICUAV_manuscript_data/5_Landscape_superpixels/features/2019_10_14_variable_names.csv',header=None,index=None)
pd.DataFrame(feat_struct.featClass).to_csv('E:/SLICUAV_manuscript_data/5_Landscape_superpixels/features/2019_10_14_variable_class.csv',header=None,index=None)
| pd.DataFrame(feat_struct.featHeightInvar) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with | tm.ensure_clean() | pandas.util.testing.ensure_clean |
import os
from glob import glob
from pprint import pprint
import json
import numpy as np
import pandas as pd
import h5py
from scipy.optimize import fsolve
def generate_file_dict(sub_simu_path):
simu_path, sub_simu_name = os.path.split(sub_simu_path)
simu_name = os.path.split(simu_path)[1]
if os.path.isfile(os.path.join(sub_simu_path, "parameters.json")):
return {
"simu_name": simu_name,
"simu_path": simu_path,
"sub_simu_name" : sub_simu_name,
"sub_simu_path" : sub_simu_path,
"path_parameter_file" : os.path.join(sub_simu_path, "parameters.json"),
"path_dataset" : os.path.join(sub_simu_path, "dataset.h5")
}
def generate_parameter_dict(param_filepath):
with open(param_filepath, 'r') as file_:
return json.load(file_)
def return_average_conc_from_path(filepath_h5, name):
dataset = | pd.read_hdf(filepath_h5, key='dataset_time_traces') | pandas.read_hdf |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
| tm.assert_numpy_array_equal(result.values, expected_values) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python
# coding: utf-8
# data analysis and wrangling
import pandas as pd
from scipy.stats import linregress
#declare variables
s = ("01", "02", "03", "04", "05", "06", "07", "09", "10", "11", "12", "13","14", "15", "16", "17","18", "20", "21", "22","23", "24","25", "26")
df = pd.DataFrame()
for i in s:
subj = 'sub-' + i
# save filepath to variable for easier access
corrpath = '/home/cisa/REWOD/DATA/STUDY/CLEAN/' + subj + '/func/'
# read the data and store data in DataFrame
corr_data = | pd.read_table(corrpath + 'corr_task-hedonic.txt',sep='\t', header=None) | pandas.read_table |
import pandas as pd
import pytest
from pandas import Timestamp
from pandas_historical import (
make_value_change_events_df,
update_value_change_events_df,
get_historical_state,
)
def test_parameterized():
currencies_scraping = pd.DataFrame(
[
{
"date": "2022-02-21",
"key": "DOLLAR",
"value": 78,
"scraping_id": 123,
},
{
"date": "2022-02-21",
"key": "EURO",
"value": 87,
"scraping_id": 123,
},
{
"date": "2022-02-28",
"key": "DOLLAR",
"value": 105,
"scraping_id": 124,
},
{
"date": "2022-03-07",
"key": "EURO",
"value": 139,
"scraping_id": 125,
},
{
"date": "2022-03-07",
"key": "EURO",
"value": 148,
"scraping_id": 125,
},
]
)
historical_df = make_value_change_events_df(currencies_scraping)
assert historical_df.to_dict() == {
"date": {
0: "2022-02-21",
1: "2022-02-28",
2: "2022-02-21",
3: "2022-03-07",
4: "2022-03-07",
},
"key": {0: "DOLLAR", 1: "DOLLAR", 2: "EURO", 3: "EURO", 4: "EURO"},
"value": {0: 78, 1: 105, 2: 87, 3: 139, 4: 148},
"scraping_id": {0: 123, 1: 124, 2: 123, 3: 125, 4: 125},
}
new_values = pd.DataFrame(
[
{
"date": "2022-03-11",
"key": "DOLLAR",
"value": 113,
"scraping_id": 127,
},
{
"date": "2022-03-11",
"key": "EURO",
"value": 144,
"scraping_id": 127,
},
]
)
historical_df = update_value_change_events_df(historical_df, new_values)
assert historical_df.to_dict() == {
"date": {
0: "2022-02-21",
1: "2022-02-28",
2: "2022-03-11",
3: "2022-02-21",
4: "2022-03-07",
5: "2022-03-07",
6: "2022-03-11",
},
"key": {
0: "DOLLAR",
1: "DOLLAR",
2: "DOLLAR",
3: "EURO",
4: "EURO",
5: "EURO",
6: "EURO",
},
"value": {0: 78, 1: 105, 2: 113, 3: 87, 4: 139, 5: 148, 6: 144},
"scraping_id": {
0: 123,
1: 124,
2: 127,
3: 123,
4: 125,
5: 125,
6: 127,
},
}
assert get_historical_state(
historical_df, state_date="2022-03-10"
).to_dict() == {
"date": {
1: Timestamp("2022-02-28 00:00:00"),
4: Timestamp("2022-03-07 00:00:00"),
},
"key": {1: "DOLLAR", 4: "EURO"},
"value": {1: 105, 4: 139},
"scraping_id": {1: 124, 4: 125},
}
assert get_historical_state(historical_df).to_dict() == {
"date": {
2: | Timestamp("2022-03-11 00:00:00") | pandas.Timestamp |
import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
_testing as tm,
)
from pandas.tests.io.pytables.common import ensure_clean_store
pytestmark = [pytest.mark.single, td.skip_array_manager_not_yet_implemented]
def test_store_datetime_fractional_secs(setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
| tm.assert_series_equal(result, ser) | pandas._testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Import standard library
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from pkg_resources import resource_filename
import datetime
import sys
# Import modules
import backtrader as bt
import backtrader.feeds as btfeed
import backtrader.analyzers as btanalyzers
import pandas as pd
import numpy as np
from collections.abc import Iterable
import time
from fastquant.notification import trigger_bot
from fastquant.config import (
INIT_CASH,
COMMISSION_PER_TRANSACTION,
GLOBAL_PARAMS,
BUY_PROP,
SELL_PROP,
)
class BaseStrategy(bt.Strategy):
"""
Base Strategy template for all strategies to be added to fastquant
"""
# Strategy level arguments
# After initialization, the `params` variable becomes accessible as an attribute of the strategy object
# with the properties of a `named tuple`
params = (
("init_cash", INIT_CASH),
("buy_prop", BUY_PROP),
("sell_prop", SELL_PROP),
("commission", COMMISSION_PER_TRANSACTION),
("stop_loss", None),
("stop_trail", None),
(
"execution_type",
"close",
), # Either open or close, to indicate if a purchase is executed based on the next open or close
("periodic_logging", False),
("transaction_logging", True),
("channel", None),
("symbol", None),
)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print("%s, %s" % (dt.isoformat(), txt))
def update_order_history(self, order):
self.order_history["dt"].append(self.datas[0].datetime.date(0))
self.order_history["type"].append("buy" if order.isbuy() else "sell")
self.order_history["price"].append(order.executed.price)
self.order_history["size"].append(order.executed.size)
self.order_history["value"].append(order.executed.value)
self.order_history["commission"].append(order.executed.comm)
self.order_history["pnl"].append(order.executed.pnl)
def update_periodic_history(self):
self.periodic_history["dt"].append(self.datas[0].datetime.date(0))
self.periodic_history["portfolio_value"].append(self.broker.getvalue())
self.periodic_history["cash"].append(self.broker.getcash())
def __init__(self):
# Global variables
self.init_cash = self.params.init_cash
self.buy_prop = self.params.buy_prop
self.sell_prop = self.params.sell_prop
self.execution_type = self.params.execution_type
self.periodic_logging = self.params.periodic_logging
self.transaction_logging = self.params.transaction_logging
self.commission = self.params.commission
self.channel = self.params.channel
self.stop_loss = self.params.stop_loss
self.stop_trail = self.params.stop_trail
self.broker.set_coc(True)
print("===Global level arguments===")
print("init_cash : {}".format(self.init_cash))
print("buy_prop : {}".format(self.buy_prop))
print("sell_prop : {}".format(self.sell_prop))
print("commission : {}".format(self.commission))
print("stop_loss : {}".format(self.stop_loss))
print("stop_trail : {}".format(self.stop_trail))
self.order_history = {
"dt": [],
"type": [],
"price": [],
"size": [],
"value": [],
"commission": [],
"pnl": [],
}
self.periodic_history = {
"dt": [],
"portfolio_value": [],
"cash": [],
}
self.order_history_df = None
self.periodic_history_df = None
self.dataclose = self.datas[0].close
self.dataopen = self.datas[0].open
self.order = None
self.buyprice = None
self.buycomm = None
# Number of ticks in the input data
self.len_data = len(list(self.datas[0]))
# Sets the latest action as "buy", "sell", or "neutral"
self.action = None
def buy_signal(self):
return True
def sell_signal(self):
return True
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
return
if order.status in [order.Completed]:
# Update order history whenever an order is completed
self.update_order_history(order)
if order.isbuy():
self.action = "buy"
if self.transaction_logging:
self.log(
"BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm: %.2f, Size: %.2f"
% (
order.executed.price,
order.executed.value,
order.executed.comm,
order.executed.size,
)
)
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.action = "sell"
if self.transaction_logging:
self.log(
"SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm: %.2f, Size: %.2f"
% (
order.executed.price,
order.executed.value,
order.executed.comm,
order.executed.size,
)
)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
if self.transaction_logging:
if not self.periodic_logging:
self.log("Cash %s Value %s" % (self.cash, self.value))
self.log("Order Canceled/Margin/Rejected")
self.log("Canceled: {}".format(order.status == order.Canceled))
self.log("Margin: {}".format(order.status == order.Margin))
self.log("Rejected: {}".format(order.status == order.Rejected))
# Write down: no pending order
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
if self.transaction_logging:
self.log(
"OPERATION PROFIT, GROSS: %.2f, NET: %.2f"
% (trade.pnl, trade.pnlcomm)
)
def notify_cashvalue(self, cash, value):
# Update cash and value every period
if self.periodic_logging:
self.log("Cash %s Value %s" % (cash, value))
self.cash = cash
self.value = value
def stop(self):
# Saving to self so it's accessible later during optimization
self.final_value = self.broker.getvalue()
self.pnl = round(self.final_value - self.init_cash, 2)
print("Final Portfolio Value: {}".format(self.final_value))
print("Final PnL: {}".format(self.pnl))
self.order_history_df = | pd.DataFrame(self.order_history) | pandas.DataFrame |
# This script runs the RDD models for a paper on the impact of COVID-19 on academic publishing
# Importing required modules
import pandas as pd
import datetime
import numpy as np
import statsmodels.api as stats
from matplotlib import pyplot as plt
import gender_guesser.detector as gender
from ToTeX import restab
# Defining a helper function for identifying COVID-19 related papers
def covid(papers, row):
string = str(papers.Title[row]) + str(papers.Abstract[row]) + str(papers.Keywords[row])
if 'covid' in string.lower():
return 1
else:
return 0
# Defining a helper function for isolating the name of the first author
def first_name(auths):
a = auths.index("'")
try:
b = auths[a+1:].index(' ')
except:
b = auths[a+1:].index("'")
return auths[a+1:b+2]
# Defining a helper function for isolating the national affiliation of the first author
def first_nationality(affils):
if str(affils) == 'nan':
affils = ''
else:
try:
a = affils.index("',")
except:
a = len(affils) - 2
c = affils[:a].count(', ')
for j in range(c):
b = affils[:a].index(', ')
affils = affils[b+2:a]
return affils
# Reading in the data
print('Reading in the data.......')
papers = pd.read_csv('C:/Users/User/Documents/Data/COVID-19/MDPI_data.csv')
# Control for COVID-19 related papers
# Creating the list
print('Creating a flag for COVID-19 related papers.......')
c19 = [covid(papers, row) for row in range(len(papers))]
# Adding COVID data to data set
print('Adding COVID-19 flag to the data set.......')
c19 = pd.Series(c19, name = 'COVID')
papers = pd.concat([papers, c19], axis = 1)
# Checking the number of COVID-19 related papers after the time cut-off as an anecdote:
# Note that this stat does not reflect dropping certain papers due to being publishing in unestablished journals
post_study_papers = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d')]
poststudy_covid = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d') and papers.COVID[i] == 1]
# Create a list of journals which will be included in the study - those with pubs prior to 2020
print('Removing papers from journals first published post 2020-01-01.......')
journals = []
for journal in papers.Journal.unique():
j = papers[papers.Journal == journal].reset_index()
if datetime.datetime.strptime(min(j.Accepted), '%Y-%m-%d') < datetime.datetime.strptime('2020-01-01', '%Y-%m-%d') and datetime.datetime.strptime(max(j.Accepted), '%Y-%m-%d') > datetime.datetime.strptime('2019-01-01', '%Y-%m-%d'):
journals.append(j.Journal[0])
# Subset data based on journals
df = papers[papers.Journal.isin(journals)].reset_index(drop = True)
# Subset data based on submission date
print('Removing papers from outside of the study time frame.......')
post1812 = [int(datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2018-12-31', '%Y-%m-%d')) for i in range(len(df))]
pre2007 = [int(datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') < datetime.datetime.strptime('2020-07-01', '%Y-%m-%d')) for i in range(len(df))]
study = pd.Series([post1812[i] * pre2007[i] for i in range(len(post1812))], name = 'Study')
df = pd.concat([df, study], axis = 1)
df = df[df.Study == 1].reset_index(drop = True)
# Computing the number of authors
print('Computing the number of authors for each paper.......')
numb_authors = [df.Authors[i].count(',') + 1 for i in range(len(df))]
numb_authors = pd.Series(numb_authors, name = 'Author_Count')
df = pd.concat([df, numb_authors], axis = 1)
# Predict perceived gender of the first author only
print('Predicting the perceived gender of first authors for each paper.......')
gd = gender.Detector()
first_author_gender = [gd.get_gender(first_name(df.Authors[i])) for i in range(len(df))]
first_author_gender = pd.Series(first_author_gender, name = 'Gender')
df = pd.concat([df, first_author_gender], axis = 1)
# Finding the nationality of the first author
print('Finding the nationality of the first author for each paper.......')
first_nat = [first_nationality(df.Affiliations[i]) for i in range(len(df))]
first_nat = pd.Series(first_nat, name = 'Nationality')
df = pd.concat([df, first_nat], axis = 1)
# Estimating the percentage of male / female authors for each paper
# Defining a helper function for the main function below
def inp_trimmer(inp):
a = inp.index("'") # mimic first_name
try:
b = inp[a+1:].index(' ') # mimic first_name
except:
b = inp[a+1:].index("'") # mimic first_name
inp = inp[b+3:] # shorten inp
try:
c = inp.index("',") # find next name or end of inp
inp = inp[c+3:]
except:
inp = ']'
return inp
# Defining a function to parse names and run them through the existing function for first author names
def all_auths(inp,nu):
if nu % 100 == 0: # Just a visual queue because this isn't particularly fast
print('Working on records ' + str(nu+1) + ' through ' + str(nu+101) + ' of 167,703.......')
gd = gender.Detector()
listicle = []
while inp != ']':
listicle.append(gd.get_gender(first_name(inp)))
inp = inp_trimmer(inp)
return listicle
# Applying this function to predict the perceived genders of all authors
# This is currently commented out because it takes quite a long time to run and too many authors are categorized as 'unknown'
#all_genders = [all_auths(df.Authors[i].replace('"',"'"),i) for i in range(len(df))]
# Below are lists of countries categorized by the World Bank Analytical Classification quartiles
high = ['Andorra', 'Antigua and Barbuda', 'Aruba', 'Australia', 'Austria', 'The Bahamas', 'Bahrain',
'Barbados', 'Belgium', 'Bermuda', 'Brunei', 'Canada', 'The Cayman Islands', 'Channel Islands',
'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Equatorial Guinea', 'Estonia', 'Faeroe Islands',
'Finland', 'France', 'French Polynesia', 'Germany', 'Greece', 'Greenland', 'Hong Kong', 'Hungary',
'Iceland', 'Ireland', 'Isle of Man', 'Israel', 'Italy', 'Japan', 'Korea', 'Kuwait', 'Liechtenstein',
'Luxembourg', 'Macao', 'Malta', 'Monaco', 'The Netherlands', 'New Caledonia', 'New Zealand',
'Northern Mariana Islands', 'Norway', 'Oman', 'Portugal', 'Qatar', 'San Marino', 'Saudi Arabia',
'Singapore', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Taiwan', 'Trinidad and Tobago',
'United Arab Emirates', 'UK', 'USA']
upper_mid = ['Algeria', 'American Samoa', 'Argentina', 'Belarus', 'Bosnia and Herzegovina', 'Botswana', 'Brazil',
'Bulgaria', 'Chile', 'Colombia', 'Costa Rica', 'Cuba', 'Dominica', 'Dominican Republic', 'Fiji',
'Gabon', 'Grenada', 'Jamaica', 'Kazakhstan', 'Latvia', 'Lebanon', 'Libya', 'Lithuania', 'Macedonia',
'Malaysia', 'Mauritius', 'Mexico', 'Montenegro', 'Namibia', 'Palau', 'Panama', 'Peru', 'Poland',
'Romania', 'Russia', 'Serbia', 'Seychelles', 'South Africa', 'Saint Kitts and Nevis', 'Saint Lucia',
'Saint Vincent and the Grenadines', 'Suriname', 'Turkey', 'Uruguay', 'Venezuela']
lower_mid = ['Albania', 'Angola', 'Armenia', 'Azerbaijan', 'Belize', 'Bhutan', 'Bolivia', 'Cabo Verde', 'Cameroon',
'China', 'Republic of the Congo', 'Ivory Coast', 'Djibouti', 'Ecuador', 'Egypt', 'El Salvador', 'Georgia',
'Guatemala', 'Guyana', 'Honduras', 'India', 'Indonesia', 'Iran', 'Iraq', 'Jordan', 'Kiribati',
'Kosovo', 'Lesotho', 'Maldives', 'Marshall Islands', 'Micronesia', 'Moldova', 'Mongolia', 'Morocco',
'Nicaragua', 'Nigeria', 'Pakistan', 'Papua New Guinea', 'Paraguay', 'Philippines', 'Samoa',
'Sao Tome and Principe', 'Solomon Islands', 'Sri Lanka', 'Sudan', 'Eswatini', 'Syria', 'Palestine',
'Thailand', 'Timor-Leste', 'Tonga', 'Tunisia', 'Turkmenistan', 'Ukraine', 'Vanuatu', 'West Bank and Gaza']
low = ['Afghanistan', 'Bangladesh', 'Benin', 'Burkina Faso', 'Burundi', 'Cambodia', 'Central African Republic',
'Chad', 'Comoros', 'Democratic Republic of the Congo', 'Eritrea', 'Ethiopia', 'The Gambia', 'Ghana', 'Guinea',
'Guinea-Bissau', 'Haiti', 'Kenya', 'Korea, Dem. Rep.', 'Kyrgyzstan', 'Laos', 'Liberia', 'Madagascar', 'Malawi',
'Mali', 'Mauritania', 'Mozambique', 'Myanmar', 'Nepal', 'Niger', 'Rwanda', 'Senegal', 'Sierra Leone', 'Somalia',
'Tajikistan', 'Tanzania', 'Togo', 'Uganda', 'Uzbekistan', 'Vietnam', 'Yemen', 'Zambia', 'Zimbabwe']
# Defining a dictionary for determining the WBAC quartile
qh = {h:'q1' for h in high}
qu = {h:'q2' for h in upper_mid}
qm = {h:'q3' for h in lower_mid}
ql = {h:'q4' for h in low}
qd = {**qh, **qu, **qm, **ql}
# Defining a function for determining the quartile of the first author's nationality
def f_quart(inp):
try:
res = qd[inp]
except:
res = ''
return res
# Determining the quartile of the affiliation of the first author
fq = [f_quart(x) for x in df.Nationality]
fq = pd.Series(fq, name = 'First_Quartile')
df = pd.concat([df, fq], axis = 1)
# Defining a function to determine the 'top quartile' for each paper
def quart(inp,nu):
if nu % 100 == 0: # Just a visual queue because this isn't particularly fast
print('Working on records ' + str(nu+1) + ' through ' + str(nu+101) + ' of 167,703.......')
listicle = []
while inp != ']':
try:
listicle.append(f_quart(first_nationality(inp)))
inp = inp_trimmer(inp)
except:
inp = ']'
if 'q1' in listicle:
res = 'q1'
elif 'q2' in listicle:
res = 'q2'
elif 'q3' in listicle:
res = 'q3'
else:
res = 'q4'
return res
# Determining the 'top quartile' present in each paper
print('Determining the top WBAC quartile present in each paper.......')
quarts = [quart(df.Affiliations[i],i) for i in range(len(df.Affiliations))]
# An indicator variable for whether or not a Q1 (high) nation contributed
q1 = [1 if q == 'q1' else 0 for q in quarts]
# Appending these two lists to the main df
quarts = pd.Series(quarts, name = 'Top_Quartile')
q1 = pd.Series(q1, name = 'Q1')
df = pd.concat([df, quarts, q1], axis = 1)
# 5443 of 167,703 had no discernable Nationality and are dropped here
df = df[df.First_Quartile != ''].reset_index(drop = True)
# Checking the number of COVID-19 related papers after the time cut-off as an anecdote:
# Note that this stat does now reflect dropping certain papers due to being publishing in unestablished journals
post_study_papers2 = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d')]
poststudy_covid2 = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d') and papers.COVID[i] == 1]
# Determining if the journal uses single blind or double blind peer review
print('Determining if the journal uses single blind or double blind peer review.......')
# Lists of journals with a double blind peer review policy
db_journals = ['Adm. Sci.', 'AgriEngineering', 'Arts', 'Buildings',
'Economies', 'Educ. Sci.', 'Games', 'Genealogy', 'Humanities',
'J. Intell.', 'J. Open Innov. Technol. Mark. Complex.',
'Journal. Media.', 'Languages', 'Laws', 'Psych', 'Religions',
'Soc. Sci.', 'Societies', 'Toxins']
db = [1 if j in db_journals else 0 for j in df.Journal]
db = pd.Series(db, name = 'Double_Blind')
df = pd.concat([df, db], axis = 1)
# Computing the distances
print('Calculating distances from thresholds.......')
# Distance from March 16 (middle of March)
XX = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-16', '%Y-%m-%d') for i in range(len(df))]
XX = [x.days for x in XX]
XX = pd.Series(XX, name = 'X-c')
df = pd.concat([df, XX], axis = 1)
# Squared distance from March 16 (middle of March)
XX2 = df['X-c']*df['X-c']
XX2 = pd.Series(XX2, name = '(X-c)^2')
df = pd.concat([df, XX2], axis = 1)
# Cubed distance from March 16 (middle of March)
XX3 = df['X-c']*df['X-c']*df['X-c']
XX3 = pd.Series(XX3, name = '(X-c)^3')
df = pd.concat([df, XX3], axis = 1)
# Distance from surrounding days to serve as robustness checks
# One week prior to March 16
XX01 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-17', '%Y-%m-%d') for i in range(len(df))]
XX02 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-18', '%Y-%m-%d') for i in range(len(df))]
XX03 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-19', '%Y-%m-%d') for i in range(len(df))]
XX04 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-20', '%Y-%m-%d') for i in range(len(df))]
XX05 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-21', '%Y-%m-%d') for i in range(len(df))]
XX06 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-22', '%Y-%m-%d') for i in range(len(df))]
XX07 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-23', '%Y-%m-%d') for i in range(len(df))]
XX01 = [x.days for x in XX01]
XX02 = [x.days for x in XX02]
XX03 = [x.days for x in XX03]
XX04 = [x.days for x in XX04]
XX05 = [x.days for x in XX05]
XX06 = [x.days for x in XX06]
XX07 = [x.days for x in XX07]
XX01 = pd.Series(XX01, name = 'X-1-c')
XX02 = pd.Series(XX02, name = 'X-2-c')
XX03 = pd.Series(XX03, name = 'X-3-c')
XX04 = pd.Series(XX04, name = 'X-4-c')
XX05 = pd.Series(XX05, name = 'X-5-c')
XX06 = pd.Series(XX06, name = 'X-6-c')
XX07 = pd.Series(XX07, name = 'X-7-c')
df = pd.concat([df, XX01, XX02, XX03, XX04, XX05, XX06, XX07], axis = 1)
# One week post March 16
XX11 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-15', '%Y-%m-%d') for i in range(len(df))]
XX12 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-14', '%Y-%m-%d') for i in range(len(df))]
XX13 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-13', '%Y-%m-%d') for i in range(len(df))]
XX14 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-12', '%Y-%m-%d') for i in range(len(df))]
XX15 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-11', '%Y-%m-%d') for i in range(len(df))]
XX16 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-10', '%Y-%m-%d') for i in range(len(df))]
XX17 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-09', '%Y-%m-%d') for i in range(len(df))]
XX11 = [x.days for x in XX11]
XX12 = [x.days for x in XX12]
XX13 = [x.days for x in XX13]
XX14 = [x.days for x in XX14]
XX15 = [x.days for x in XX15]
XX16 = [x.days for x in XX16]
XX17 = [x.days for x in XX17]
XX11 = pd.Series(XX11, name = 'X+1-c')
XX12 = pd.Series(XX12, name = 'X+2-c')
XX13 = pd.Series(XX13, name = 'X+3-c')
XX14 = pd.Series(XX14, name = 'X+4-c')
XX15 = pd.Series(XX15, name = 'X+5-c')
XX16 = pd.Series(XX16, name = 'X+6-c')
XX17 = pd.Series(XX17, name = 'X+7-c')
df = pd.concat([df, XX11, XX12, XX13, XX14, XX15, XX16, XX17], axis = 1)
# Adding the post-effect variables for the main regression
D = [1 if df['X-c'][i] >= 0 else 0 for i in range(len(df))]
D = pd.Series(D, name = 'D')
DXc = D*df['X-c']
DXc2 = D*df['X-c']*df['X-c']
DXc3 = D*df['X-c']*df['X-c']*df['X-c']
DXc = pd.Series(DXc, name = 'D(X-c)')
DXc2 = pd.Series(DXc2, name = 'D(X-c)^2')
DXc3 = pd.Series(DXc3, name = 'D(X-c)^3')
df = pd.concat([df, D, DXc, DXc2, DXc3], axis = 1)
# Adding the post-effect variables for the robustness checks
D01 = [1 if df['X-1-c'][i] >= 0 else 0 for i in range(len(df))]
D02 = [1 if df['X-2-c'][i] >= 0 else 0 for i in range(len(df))]
D03 = [1 if df['X-3-c'][i] >= 0 else 0 for i in range(len(df))]
D04 = [1 if df['X-4-c'][i] >= 0 else 0 for i in range(len(df))]
D05 = [1 if df['X-5-c'][i] >= 0 else 0 for i in range(len(df))]
D06 = [1 if df['X-6-c'][i] >= 0 else 0 for i in range(len(df))]
D07 = [1 if df['X-7-c'][i] >= 0 else 0 for i in range(len(df))]
D01 = pd.Series(D01, name = 'D-1')
D02 = pd.Series(D02, name = 'D-2')
D03 = pd.Series(D03, name = 'D-3')
D04 = pd.Series(D04, name = 'D-4')
D05 = pd.Series(D05, name = 'D-5')
D06 = pd.Series(D06, name = 'D-6')
D07 = pd.Series(D07, name = 'D-7')
D11 = [1 if df['X+1-c'][i] >= 0 else 0 for i in range(len(df))]
D12 = [1 if df['X+2-c'][i] >= 0 else 0 for i in range(len(df))]
D13 = [1 if df['X-3-c'][i] >= 0 else 0 for i in range(len(df))]
D14 = [1 if df['X+4-c'][i] >= 0 else 0 for i in range(len(df))]
D15 = [1 if df['X+5-c'][i] >= 0 else 0 for i in range(len(df))]
D16 = [1 if df['X+6-c'][i] >= 0 else 0 for i in range(len(df))]
D17 = [1 if df['X+7-c'][i] >= 0 else 0 for i in range(len(df))]
D11 = pd.Series(D11, name = 'D+1')
D12 = pd.Series(D12, name = 'D+2')
D13 = pd.Series(D13, name = 'D+3')
D14 = pd.Series(D14, name = 'D+4')
D15 = pd.Series(D15, name = 'D+5')
D16 = pd.Series(D16, name = 'D+6')
D17 = pd.Series(D17, name = 'D+7')
df = pd.concat([df, D01, D02, D03, D04, D05, D06, D07, D11, D12, D13, D14, D15, D16, D17], axis = 1)
DXc01 = D01*df['X-1-c']
DXc02 = D02*df['X-2-c']
DXc03 = D03*df['X-3-c']
DXc04 = D04*df['X-4-c']
DXc05 = D05*df['X-5-c']
DXc06 = D06*df['X-6-c']
DXc07 = D07*df['X-7-c']
DXc11 = D11*df['X+1-c']
DXc12 = D12*df['X+2-c']
DXc13 = D13*df['X+3-c']
DXc14 = D14*df['X+4-c']
DXc15 = D15*df['X+5-c']
DXc16 = D16*df['X+6-c']
DXc17 = D17*df['X+7-c']
DXc01 = pd.Series(DXc01, name = 'D-1(X-c)')
DXc02 = pd.Series(DXc02, name = 'D-2(X-c)')
DXc03 = pd.Series(DXc03, name = 'D-3(X-c)')
DXc04 = pd.Series(DXc04, name = 'D-4(X-c)')
DXc05 = pd.Series(DXc05, name = 'D-5(X-c)')
DXc06 = pd.Series(DXc06, name = 'D-6(X-c)')
DXc07 = pd.Series(DXc07, name = 'D-7(X-c)')
DXc11 = pd.Series(DXc11, name = 'D+1(X-c)')
DXc12 = pd.Series(DXc12, name = 'D+2(X-c)')
DXc13 = pd.Series(DXc13, name = 'D+3(X-c)')
DXc14 = pd.Series(DXc14, name = 'D+4(X-c)')
DXc15 = pd.Series(DXc15, name = 'D+5(X-c)')
DXc16 = pd.Series(DXc16, name = 'D+6(X-c)')
DXc17 = pd.Series(DXc17, name = 'D+7(X-c)')
df = pd.concat([df, DXc01, DXc02, DXc03, DXc04, DXc05, DXc06, DXc07, DXc11, DXc12, DXc13, DXc14, DXc15, DXc16, DXc17], axis = 1)
# Calculating a total author time to add to the data set as a potential dependent variable
A = [df.Total[i] - df.Editor[i] for i in range(len(df))]
A = pd.Series(A, name = 'Author')
df = pd.concat([df, A], axis = 1)
# Adding natural logarithm transformed arXiv data
ln_arXiv7 = pd.Series(np.log(df.arXiv7.values), name = 'ln_arXiv7')
ln_arXiv14 = pd.Series(np.log(df.arXiv14.values), name = 'ln_arXiv14')
ln_arXiv30 = pd.Series(np.log(df.arXiv30.values), name = 'ln_arXiv30')
ln_new7 = pd.Series(np.log(df.new7.values), name = 'ln_new7')
ln_new14 = pd.Series(np.log(df.new14.values), name = 'ln_new14')
ln_new30 = pd.Series(np.log(df.new30.values), name = 'ln_new30')
df = pd.concat([df, ln_arXiv7, ln_arXiv14, ln_arXiv30, ln_new7, ln_new14, ln_new30], axis = 1)
# Two journals had a bad date resulting in an infeasible value for Stage1 so they are dropped here
df = df[df.Stage1 >= 0].reset_index(drop = True)
# Defining a function for adding a month dummy
def month(m):
md = {'01':'JAN', '02':'FEB', '03':'MAR', '04':'APR', '05':'MAY', '06':'JUN',
'07':'JUL', '08':'AUG', '09':'SEP', '10':'OCT', '11':'NOV', '12':'DEC', } # a month dictionary
s = m[5:7] # the month as a number stored as a string
mon = md[s]# getting the month from the dictionary
return mon
# Add a month dummy using the function
months = [month(m) for m in df.Submitted]
months = pd.Series(months, name = 'Month')
df = pd.concat([df, months], axis = 1)
# Prepping the data for the regressions
Stage1 = np.log(df.Stage1.values)
Stage2 = np.log(df.Stage2.values)
Stage3 = np.log(df.Stage3.values)
Total = np.log(df.Total.values)
Editor = np.log(df.Editor.values)
XX = stats.add_constant(df[['X-c', '(X-c)^2', '(X-c)^3', 'D', 'D(X-c)', 'D(X-c)^2', 'D(X-c)^3',
'COVID', 'Double_Blind', 'Author_Count', 'ln_arXiv14']])
# Creating the fixed effects
dG = pd.get_dummies(df['Gender'])
dF = pd.get_dummies(df['Frascati'])
dQ = pd.get_dummies(df['First_Quartile'])
dN = pd.get_dummies(df['Nationality'])
dJ = | pd.get_dummies(df['Journal']) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 24 16:43:33 2019
@author: jeremy_lehner
"""
import pandas as pd
import datetime
from selenium import webdriver
import time
from bs4 import BeautifulSoup
from os import path
def get_scrape_date():
"""
Gets the date on which data was scraped
Parameters
----------
None
Returns
-------
date : string
Date that data was scraped in the format 'YYYY-MM-DD'
"""
# Get current date and time
now = datetime.datetime.now()
year_scraped = str(now.year)
month_scraped = str(now.month)
day_scraped = str(now.day)
# Add leading zeroes to single-digit months and days
if len(month_scraped) == 1:
month_scraped = '0' + month_scraped
if len(day_scraped) == 1:
day_scraped = '0' + day_scraped
# Construct date string
date_data = year_scraped + '-' + month_scraped + '-' + day_scraped
# Bye! <3
return date_data
def scrape_champ_names(save=True):
"""
Scrapes champion names from League of Legends Wiki and saves them to
csv file, but returns nothing
Parameters
----------
save : boolean
Save names to csv file?
Returns
-------
None
"""
# Assign scrape path variables
url = 'https://leagueoflegends.fandom.com/wiki/List_of_champions'
# Get champion names
names = pd.read_html(url)[1]
names = list(names['Champion'])
names = [s.split(',')[0] for s in names]
names = [s.split('\xa0the')[0] for s in names]
names = pd.Series(names).rename('champion')
# Write names to csv file
if save:
names.to_csv('./data/champion_names.csv', index=False)
# Bye! <3
return
def scrape_release_dates(save=True):
"""
Scrapes champion release dates from League of Legends Wiki and saves them
to csv file, but returns nothing
Parameters
----------
save : boolean
Save release dates to csv file ('YYYY-MM-DD')?
Returns
-------
None
"""
# Assign scrape path variables
url = 'https://leagueoflegends.fandom.com/wiki/List_of_champions'
# Get release dates
dates = pd.read_html(url)[1]
dates = dates['Release Date'].rename('release_date')
# Write release dates to csv file
if save:
dates.to_csv('./data/champion_release_dates.csv', index=False)
# Bye! <3
return
def scrape_number_of_skins(names, save=True):
"""
Scrapes number of champion skins from League of Legends Wiki and saves
them to a csv file, but returns nothing
Parameters
----------
names : pandas series
Contains the champion names as strings in alphabetical order
save : boolean
Save number of champion skins to csv file?
Returns
-------
None
"""
# Assign scrape path variables
style = 'display:inline-block; margin:5px; width:342px'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
# Get number of skins
num_skins = []
for name in names:
name = name.replace(' ', '_')
skins_url = f'https://leagueoflegends.fandom.com/wiki/{name}/Skins'
driver.get(skins_url)
time.sleep(2)
soup = BeautifulSoup(driver.page_source, 'html.parser')
num_skins.append(len(soup.find_all('div', {'style': style})))
num_skins = pd.Series(num_skins)
# Close selenium web driver
driver.close()
if save:
num_skins.to_csv('./data/num_skins.csv', index=False)
# Bye! <3
return
def scrape_win_rates(save=True):
"""
Scrapes the current day North America champion win rates from op.gg and
saves them to a csv file along with the date, but returns nothing
Parameters
----------
save : boolean
Save win rates as csv file?
Returns
-------
None
"""
# Get date at time of scraping
date = get_scrape_date()
# Assign scraping variables
champstats_url = 'https://na.op.gg/statistics/champion/'
today_xpath = '//*[@id="recent_today"]/span/span'
winrate_xpath = '//*[@id="rate_win"]/span/span'
scroll_down = "window.scrollTo(0, document.body.scrollHeight);"
champs = 'Champion.1'
win = 'Win rate'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
driver.get(champstats_url)
# Select stats for current day
today_button = driver.find_element_by_xpath(today_xpath)
today_button.click()
# Select win rates
winrate_button = driver.find_element_by_xpath(winrate_xpath)
winrate_button.click()
# Scroll to bottom of page and wait to bypass ads
driver.execute_script(scroll_down)
time.sleep(10)
# Scrape win rates
winrates = pd.read_html(driver.page_source)[1]
winrates = winrates[[champs, win]]
# Sort win rates by champion in alphabetical order
winrates.sort_values(by=champs, inplace=True)
winrates = winrates[win].reset_index()[win]
# Close selenium web driver
driver.close()
# Convert win rates to float
winrates = winrates.str.replace('%', '')
winrates = round(winrates.astype('float')/100, 4)
# Add a column with the date
winrates = pd.DataFrame({'winrate': winrates, 'date': date})
# Write win rates to csv file
if save:
date = date.replace('-', '')
winrates.to_csv(f'./data/win/win_rates_{date}.csv', index=False)
else:
print('Win rates were scraped, but not saved!')
# Bye! <3
return
def scrape_ban_rates(save=True):
"""
Scrapes the current day North America champion ban rates from op.gg and
saves them to a csv file along with the date, but returns nothing
Parameters
----------
save : boolean
Save ban rates as csv file?
Returns
-------
None
"""
# Get date at time of scraping
date = get_scrape_date()
# Assign scraping variables
champstats_url = 'https://na.op.gg/statistics/champion/'
today_xpath = '//*[@id="recent_today"]/span/span'
banrate_xpath = '//*[@id="rate_ban"]/span/span'
scroll_down = "window.scrollTo(0, document.body.scrollHeight);"
champs = 'Champion.1'
ban = 'Ban ratio per game'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
driver.get(champstats_url)
# Select stats for current day
today_button = driver.find_element_by_xpath(today_xpath)
today_button.click()
# Select ban rates
banrate_button = driver.find_element_by_xpath(banrate_xpath)
banrate_button.click()
# Scroll to bottom of page and wait to bypass ads
driver.execute_script(scroll_down)
time.sleep(10)
# Scrape ban rates
banrates = pd.read_html(driver.page_source)[1]
banrates = banrates[[champs, ban]]
# Sort ban rates by champion in alphabetical order
banrates.sort_values(by=champs, inplace=True)
banrates = banrates[ban].reset_index()[ban]
# Close Selenioum web driver
driver.close()
# Convert ban rates to float
banrates = banrates.str.replace('%', '')
banrates = round(banrates.astype('float')/100, 4)
# Add a column with the date
banrates = pd.DataFrame({'banrate': banrates, 'date': date})
# Write ban rates to csv file
if save:
date = date.replace('-', '')
banrates.to_csv(f'./data/ban/ban_rates_{date}.csv', index=False)
else:
print('Ban rates were scraped, but not saved!')
# Bye! <3
return banrates
def scrape_pick_rates(save=True):
"""
Scrapes the current day North America champion pick rates from op.gg and
saves them to a csv file along with the date, but returns nothing
Parameters
----------
save : boolean
Save pick rates as csv file?
Returns
-------
None
"""
# Get date at time of scraping
date = get_scrape_date()
# Assign scraping variables
champstats_url = 'https://na.op.gg/statistics/champion/'
today_xpath = '//*[@id="recent_today"]/span/span'
pickrate_xpath = '//*[@id="rate_pick"]/span/span'
scroll_down = "window.scrollTo(0, document.body.scrollHeight);"
champs = 'Champion.1'
pick = 'Pick ratio per game'
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
driver.get(champstats_url)
# Select stats for current day
today_button = driver.find_element_by_xpath(today_xpath)
today_button.click()
# Select pick rates
pickrate_button = driver.find_element_by_xpath(pickrate_xpath)
pickrate_button.click()
# Scroll to bottom of page and wait to bypass ads
driver.execute_script(scroll_down)
time.sleep(10)
# Scrape pick rates
pickrates = pd.read_html(driver.page_source)[1]
pickrates = pickrates[[champs, pick]]
# Sort ban rates by champion in alphabetical order
pickrates.sort_values(by=champs, inplace=True)
pickrates = pickrates[pick].reset_index()[pick]
# Close selenium web driver
driver.close()
# Convert pick rates to float
pickrates = pickrates.str.replace('%', '')
pickrates = round(pickrates.astype('float')/100, 4)
# Add a column with the date
pickrates = pd.DataFrame({'pickrate': pickrates, 'date': date})
# Write pick rates to csv file
if save:
date = date.replace('-', '')
pickrates.to_csv(f'./data/pick/pick_rates_{date}.csv', index=False)
else:
print('Pick rates were scraped, but not saved!')
# Bye! <3
return pickrates
def scrape_last_patch_change(names, save=True):
"""
Scrapes the last patch in which each champion was changed from League Wiki
and saves them to a csv file, but returns nothing
Parameters
----------
names : pandas series
Contains the champion names as strings in alphabetical order
save : boolean
Save the last patch each champion was changed to csv file?
Returns
-------
None
"""
# Set up selenium web driver
driver = webdriver.Chrome('./src/utils/chromedriver')
# Get patch when champion was last changed
last_patch = []
for name in names:
name = name.replace(' ', '_')
champ_url = f'https://lol.gamepedia.com/{name}#Patch_History'
driver.get(champ_url)
time.sleep(2)
# Parse the champion page HTML
soup = BeautifulSoup(driver.page_source, 'html.parser')
# Get entire patch history but only grab patch versions from HTML
history = [link for link in soup.find_all('a')
if '>v1.' in str(link) or 'Patch 1.' in str(link)
or '>v2.' in str(link) or 'Patch 2.' in str(link)
or '>v3.' in str(link) or 'Patch 3.' in str(link)
or '>v4.' in str(link) or 'Patch 4.' in str(link)
or '>v5.' in str(link) or 'Patch 5.' in str(link)
or '>v6.' in str(link) or 'Patch 6.' in str(link)
or '>v7.' in str(link) or 'Patch 7.' in str(link)
or '>v8.' in str(link) or 'Patch 8.' in str(link)
or '>v9.' in str(link) or 'Patch 9.' in str(link)]
# Get only the most recent patch in which the champion was changed
most_recent = history[0]
most_recent = str(most_recent)[-8:-4]
last_patch.append(most_recent)
# Close selenium web driver
driver.close()
# Standardize the patch version format
for idx, patch in enumerate(last_patch):
last_patch[idx] = patch.replace('v', '')
for idx, patch in enumerate(last_patch):
last_patch[idx] = patch.replace(' ', '')
# Convert the patches into a pandas series
last_patch = | pd.Series(last_patch) | pandas.Series |
import pandas as pd
import argparse
import matplotlib.pyplot as plt
import os
from collections import Counter
from yellowbrick.text import FreqDistVisualizer
# import rake
import numpy as np
from ast import literal_eval
from nltk import ngrams
from sklearn.feature_extraction.text import CountVectorizer
def read_file(csv_file):
"""
params:
csv_file: file to be processed
returns: dataframe
"""
df = pd.read_csv(csv_file, delimiter='\t')
return df
def flatten(texts):
"""
Flattens list of lists
params:
texts: list of lists
return: flattened list
"""
flattened_list = [item for items in texts for item in items]
return flattened_list
def texts_list(df):
""" Turn string of list into list and append to bigger list
params:
df: preprocessed dataframe
returns: list of lists (tokenized)
"""
texts = []
for text in df['tokens_pos']:
texts.append(literal_eval((text)))
return texts
def create_corpus(flatten_tokenized_texts):
"""
params:
tokenized_texts: list of tokenized sentences
returns:
dictionary with words as keys and frequency as values
dictionary with pos as keys and frequency as values
dictionary with (token, pos) as keys and frequency as values
"""
corpus = []
pos_corpus = []
token_pos_corpus = []
for token, pos in flatten_tokenized_texts:
corpus += token
pos_corpus += pos
token_pos_corpus += (token, pos)
corpus = Counter(corpus)
pos_corpus = Counter(pos_corpus)
token_pos_corpus = Counter(token_pos_corpus)
return corpus, pos_corpus, token_pos_corpus
def most_frequent(corpus, file_name, corpus_type):
"""
saves plot of 20 most frequent words to folder
params:
corpus: count word dictionary
file_name: name of file to be processed
"""
if corpus_type == 'token':
outfile_name = 'keyword_analysis_results/mf_token_' + file_name + '.png'
elif corpus_type == 'pos':
outfile_name = 'keyword_analysis_results/mf_pos_' + file_name + '.png'
elif corpus_type == 'token_pos':
outfile_name = 'keyword_analysis_results/mf_token_pos_' + file_name + '.png'
plot_counter(corpus, 'most frequent words', outfile_name)
def kwic(corpus, word):
"""
params:
corpus: corpus
word: word for the context
returns: list of keywords in context
"""
kwic_list = list(corpus.concordance(word, width=5, lines=5))
return kwic_list
def ngrams_list(tokenized_texts, n):
"""
params:
tokenized_texts: list of lists containing tokenized texts
n: number of words for gram
returns: list consisting of n-grams
"""
ngram_tokens_list = []
ngram_pos_list = []
for text in tokenized_texts:
tokens_list = []
pos_list = []
for token, pos in text:
tokens_list.append(token)
pos_list.append(pos)
ngram_tokens = list(ngrams(tokens_list, n))
ngram_pos = list(ngrams(pos_list, n))
ngram_tokens_list.append(ngram_tokens)
ngram_pos_list.append(ngram_pos)
return ngram_tokens_list, ngram_pos_list
def plot_counter(counter_object, title, outfile_name):
keys = []
values = []
for item in counter_object.most_common(20):
keys.append(item[0])
values.append(item[1])
keys = list(reversed(keys))
values = list(reversed(values))
df = pd.DataFrame({'freq': values}, index=keys)
df.plot.barh(align='center')
plot_ttl = 'Top 20 word ' + title
plt.title(plot_ttl)
plt.xlabel('Frequency')
plt.tight_layout()
plt.savefig(outfile_name, bbox_inches='tight')
def word_collocation(tokenized_texts, n, file_name):
"""
Save plot of top 20 word collocations (word following each other)
params:
tokenized_texts: list of lists containing tokenized texts
n: number of ngrams
file_name:
"""
ngram_tokens, ngram_pos = ngrams_list(tokenized_texts, n)
flat_ngram_tokens = flatten(ngram_tokens)
flat_ngram_pos = flatten(ngram_pos)
ngram_count_tokens = Counter(flat_ngram_tokens)
ngram_count_pos = Counter(flat_ngram_pos)
outfile_tokens = 'keyword_analysis_results/colloc_tokens' + file_name + '.png'
outfile_pos = 'keyword_analysis_results/colloc_pos' + file_name + '.png'
plot_counter(ngram_count_tokens, 'collocation', outfile_tokens)
plot_counter(ngram_count_pos, 'collocation', outfile_pos)
def word_cooccurence(df, filename):
docs = df.clean_text.tolist()
counter = CountVectorizer(ngram_range=(1, 1))
X = counter.fit_transform(docs)
# X[X > 0] = 1 # run this line if you don't want extra within-text cooccurence (see below)
Xc = (X.T * X) # this is co-occurrence matrix in sparse csr format
Xc.setdiag(0) # sometimes you want to fill same word cooccurence to 0
print(docs)
print(Xc.todense())
print(counter.vocabulary_)
# cv = CountVectorizer(ngram_range=(2, 2))
# docs = cv.fit_transform(df.clean_text)
# # d = cv.vocabulary_
# keys = cv.get_feature_names()
# visualizer = FreqDistVisualizer(
# features=keys, size=(1080, 720)
# )
# visualizer.fit(docs)
# visualizer.show()
#
# outfile_name = 'keyword_analysis_results/cooc_' + filename + '.png'
# plt.barh(keys, values, align='center')
# plt.title('Top word cooccurrences')
# plt.xlabel('Frequency')
# plt.tight_layout()
# plt.savefig(outfile_name, bbox_inches='tight')
def main():
parser = argparse.ArgumentParser(description='List the content of a folder')
# Add the arguments
parser.add_argument('csv_file', help='preprocessed file for sentiment analysis')
parser.add_argument('platform', help='platform of preprocessed file')
# Execute the parse_args() method
args = parser.parse_args()
preprocessed_file = 'preprocessed_data/' + args.platform + '_preprocessed_' + args.csv_file
df = | pd.read_csv(preprocessed_file, delimiter='\t') | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import numpy as np
import pandas as pd
import pickle
import mysql.connector
import configparser
config = configparser.ConfigParser()
config.read("configm.ini")
with open('model_match', 'rb') as f:
mp = pickle.load(f)
mydb = mysql.connector.connect(
host=config.get('db-connection','host'),
user=config.get('db-connection','user'),
password=config.get('db-connection','passcode'),
database=config.get('db-connection','name')
)
query = config.get('data-extraction','mainquery')
df = pd.read_sql(query, mydb)
print(df)
df_drop = pd.read_csv('https://app.redash.io/xchange/api/queries/381074/results.csv?api_key=' + config.get('data-extraction','dropoffKey'))
df_drop = df_drop.rename(columns={'Dropoff': 'Dropoff_location'})
df_pick = pd.read_csv('https://app.redash.io/xchange/api/queries/381097/results.csv?api_key=' + config.get('data-extraction','pickupKey'))
df_pick = df_pick.rename(columns={'Pickup': 'Pickup_location'})
df_requester = pd.read_csv('https://app.redash.io/xchange/api/queries/381058/results.csv?api_key=' + config.get('data-extraction','requesterKey'))
df_requester = df_requester.rename(columns={'requester_id': 'Requester'})
df_add = pd.read_csv('https://app.redash.io/xchange/api/queries/381054/results.csv?api_key=' + config.get('data-extraction','addresseeKey'))
df_add = df_add.rename(columns={'Addressee_id': 'Addressee'})
def dataMerge(df1,df2,col):
result = pd.merge(df1, df2, how='left', on=[col])
return result
result1 = dataMerge(df1=df,df2=df_drop,col='Dropoff_location')
result1.drop(["Dropoff_location","Count", "when_accepted"],inplace=True,axis=1) # Removing old pickup location values
result1 = result1.rename(columns={'Shares_accepted': 'Dropoff_location'}) # Renaming columns
result2 = dataMerge(df1=result1,df2=df_pick,col='Pickup_location')
result2.drop(["Pickup_location","Count", "when_accepted"],inplace=True,axis=1) # Removing old pickup location values
result2 = result2.rename(columns={'Shares_accepted': 'Pickup_location'})
result3 = dataMerge(df1=result2,df2=df_requester,col='Requester')
result3.drop(["Requester","Count", "when_accepted"],inplace=True,axis=1)# Removing old pickup location values
result3 = result3.rename(columns={'Frequency': 'Requester'})# Renaming columns
result4 = dataMerge(df1=result3,df2=df_add,col='Addressee')
result4.drop(["Addressee","Count", "when_accepted"],inplace=True,axis=1)# Removing old pickup location values
result4 = result4.rename(columns={'Frequency': 'Addressee'})# Renaming columns
result4 = result4.fillna(0)
result4.drop(['Requirement_id'], axis=1 , inplace=True)
result4 = pd.concat([result4,pd.get_dummies(result4['Direction'])],axis=1)
result4.drop(['Direction'],axis=1, inplace=True)
#result4 = pd.concat([result4,pd.get_dummies(result4['Container_Type'])],axis=1)
#result4.drop(['Container_Type'],axis=1, inplace=True)
#result5 = result4.head()
#result5.to_csv('Match_final.csv', index = False)
print(result4)
pred_try = mp.predict(result4)
pred_try_df=pd.DataFrame(pred_try, columns=['Match_Prediction'])
Req = df.Requirement_id
Req = Req.reset_index(drop=True)
Req = pd.DataFrame(Req,columns=['Requirement_id'])
Company = df.Requester
Company = Company.reset_index(drop=True)
Company = pd.DataFrame(Company,columns=['Requester'])
Partner = df.Addressee
Partner = Partner.reset_index(drop=True)
Partner = pd.DataFrame(Partner,columns=['Addressee'])
Pick = df.Pickup_location
Pick = Pick.reset_index(drop=True)
Pick = | pd.DataFrame(Pick,columns=['Pickup_location']) | pandas.DataFrame |
import unittest
import pandas as pd
from analysis.data import GeographicArea, features
from analysis.scaler import SpatialWaterVapourScaler
from analysis.search import GridSearchHDBSCAN, GridSearchDBSCAN
from analysis.aggregation import AggregateClusterStatistics
from sklearn.model_selection import ParameterGrid
import luigi
file = 'test/resources/METOPAB_20160101_global_evening_1000.nc'
class TestData(unittest.TestCase):
def test_import(self):
area = GeographicArea(lat=(-25, 50), lon=(-45, 60))
df = area.import_dataset(file)
self.assertEqual(df.shape, (541, 4))
# test filtering
self.assertGreaterEqual(df.lat.min(), -25)
self.assertLessEqual(df.lat.max(), 50)
self.assertGreaterEqual(df.lon.min(), -45)
self.assertLessEqual(df.lon.max(), 60)
class TestScaler(unittest.TestCase):
def test_latitude_scaling(self):
df = pd.DataFrame({
'lat': [40., 40.],
'lon': [5., -5.],
'H2O': [2., 2.],
'delD': [3., 3.]
})
self.assertListEqual(list(df.columns), features)
scaler = SpatialWaterVapourScaler(delD=10, H2O=0.1, km=50)
X_ = scaler.fit_transform(df[features].values)
# lat
self.assertAlmostEqual(X_[0, 1] * scaler.km, 425, places=0)
self.assertAlmostEqual(X_[1, 1] * scaler.km, -425, places=0)
# lon
self.assertAlmostEqual(X_[0, 0] * scaler.km, 40 * 111)
class TestGridSearch(unittest.TestCase):
def test_hdbscan(self):
task = GridSearchHDBSCAN(
file=file,
dst='/tmp/cluster',
force_upstream=True
)
assert luigi.build([task], local_scheduler=True)
df = pd.read_csv(task.output().path)
columns = set(df.columns)
expected = {'total', 'n_cluster', 'cluster_size_mean',
'cluster_size_std', 'noise'}
self.assertTrue(expected <= columns)
self.assertEqual(df.shape[1], 14)
def test_dbscan(self):
task = GridSearchDBSCAN(
file=file,
dst='/tmp/cluster',
force_upstream=True
)
assert luigi.build([task], local_scheduler=True)
df = pd.read_csv(task.output().path)
columns = set(df.columns)
expected = {'total', 'n_cluster', 'cluster_size_mean',
'cluster_size_std', 'noise'}
self.assertTrue(expected <= columns)
self.assertEqual(df.shape, (2, 14))
def test_aggregation(self):
grid_params = {
'scaler__km': [60],
'scaler__H2O': [0.1],
'scaler__delD': [10],
'cluster__eps': [2.],
'cluster__min_samples': [10, 12]
}
task = AggregateClusterStatistics(
grid_params,
file_pattern='test/resources/METOPAB*evening_1000.nc',
dst='/tmp/cluster',
clustering_algorithm='dbscan',
force_upstream=True
)
assert luigi.build([task], local_scheduler=True)
with task.output().open() as out:
df = | pd.read_csv(out) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
# bounds testing
values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12'])
# positive index
result = values.str.split('_').str.get(2)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = values.str.split('_').str.get(-3)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na=True)
expected = Series([True, True, True], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na="foo")
expected = | Series(["foo", "foo", "foo"], dtype=np.object_) | pandas.Series |
# Copyright 2021-present Kensho Technologies, LLC.
from collections import Counter
import logging
from multiprocessing import Pool
import os
import pandas as pd
import re
import typing
from kwnlp_preprocessor import argconfig
from kwnlp_preprocessor import utils
logger = logging.getLogger(__name__)
def parse_file(args: dict) -> None:
# read links
logger.info("parsing {}".format(args["link_file_path"]))
df_links = pd.read_csv(
args["link_file_path"],
usecols=["anchor_text", "source_page_id", "target_page_id"],
)
# calculate anchor target counts
atc: typing.Counter[typing.Tuple[str, int]] = Counter(
(zip(df_links["anchor_text"], df_links["target_page_id"]))
)
df_atc = pd.DataFrame(
[(el[0][0], el[0][1], el[1]) for el in atc.most_common()],
columns=["anchor_text", "target_page_id", "count"],
)
df_atc.to_csv(args["atc_file_path"], index=False)
# calculate in/out link counts
df_in = pd.DataFrame(
Counter(df_links["target_page_id"]).most_common(),
columns=["page_id", "in_count"],
)
df_out = pd.DataFrame(
Counter(df_links["source_page_id"]).most_common(),
columns=["page_id", "out_count"],
)
df_inout = | pd.merge(df_in, df_out, on="page_id", how="outer") | pandas.merge |
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import pandas as pd
import numpy as np
import moment
from operator import itemgetter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().format('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication details
def getAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def getIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatIsoDate(self,d):
return moment.date(d).format('YYYY-MM-DD')
def getDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.format('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().format('YYYY-MM-DD')
currentYear = self.getIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).format('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).format('YYYY-MM-DD')
pe = self.getIsoWeek(currentWDate)
periods.append(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).format('YYYY-MM-DD')
pe = self.getIsoWeek(currentWDate)
periods.append(pe)
else:
pe = 'LAST_7_DAYS'
periods.append(pe)
return periods
def getHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.get(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def getHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.get(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Update data
def updateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def getArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.append(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.get(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def getOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and len(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def getOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and len(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".format(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".format(prefix,sep,column,sep,code)
else:
code = "{}{}{}".format(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " reminder"
text = "Dear all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.append({"id": outbreak['orgUnit']})
organisationUnits.append({"id": outbreak['reportingOrgUnit']})
message.append(subject)
message.append(text)
message.append(users)
message.append(organisationUnits)
message = tuple(message)
return pd.Series(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,headers,type):
cols = []
for header in headers:
if(type == 'EVENT'):
if header['name'] == self.dateOfOnsetUid:
cols.append('onSetDate')
elif header['name'] == self.conditionOrDiseaseUid:
cols.append('disease')
elif header['name'] == self.regPatientStatusOutcome:
cols.append('immediateOutcome')
elif header['name'] == self.patientStatusOutcome:
cols.append('statusOutcome')
elif header['name'] == self.testResult:
cols.append('testResult')
elif header['name'] == self.testResultClassification:
cols.append('testResultClassification')
elif header['name'] == self.caseClassification:
cols.append('caseClassification')
else:
cols.append(header['name'])
elif (type == 'DATES'):
cols.append(header['name'])
else:
cols.append(header['column'])
return cols
# Get start and end date
def getStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createDataFrame(self,events,type=None):
if type is None:
if events is not None:
#pd.DataFrame.from_records(events)
dataFrame = pd.io.json.json_normalize(events)
else:
dataFrame = pd.DataFrame()
else:
cols = self.createColumns(events['headers'],type)
dataFrame = pd.DataFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = pd.DataFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and len(aggData['rows']) >0):
df = self.createDataFrame(aggData,'AGGREGATE')
dfColLength = len(df.columns)
df1 = df.iloc[:,(detectionLevel+4):dfColLength]
df.iloc[:,(detectionLevel+4):dfColLength] = df1.apply(pd.to_numeric,errors='coerce').fillna(0).astype(np.int64)
# print(df.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do mean for current cases or deaths
df['mean_current_cases'] = df.iloc[:,(detectionLevel+4)]
df['mean_mn_cases'] = df.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].mean(axis=1)
df['stddev_mn_cases'] = df.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].std(axis=1)
df['mean20std_mn_cases'] = (df.mean_mn_cases + (2*df.stddev_mn_cases))
df['mean15std_mn_cases'] = (df.mean_mn_cases + (1.5*df.stddev_mn_cases))
df['mean_current_deaths'] = df.iloc[:,(detectionLevel+5+m)]
df['mean_mn_deaths'] = df.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].mean(axis=1)
df['stddev_mn_deaths'] = df.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].std(axis=1)
df['mean20std_mn_deaths'] = (df.mean_mn_deaths + (2*df.stddev_mn_deaths))
df['mean15std_mn_deaths'] = (df.mean_mn_deaths + (1.5*df.stddev_mn_deaths))
# periods
df['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.getStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
df['dateOfOnSetWeek'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
df['firstCaseDate'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
df['lastCaseDate'] = moment.date(startEndDates[1]).format('YYYY-MM-DD')
df['endDate'] = ""
df['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).format('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
df['mean_current_cases'] = df.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].mean(axis=1)
df['mean_mn_cases'] = df.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].mean(axis=1)
df['stddev_mn_cases'] = df.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].std(axis=1)
df['mean20std_mn_cases'] = (df.mean_mn_cases + (2*df.stddev_mn_cases))
df['mean15std_mn_cases'] = (df.mean_mn_cases + (1.5*df.stddev_mn_cases))
df['mean_current_deaths'] = df.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].mean(axis=1)
df['mean_mn_deaths'] = df.iloc[:,(detectionLevel+3+(2*m)+(m*n)):dfColLength-1].mean(axis=1)
df['stddev_mn_deaths'] = df.iloc[:,(detectionLevel+3+(2*m)+(m*n)):dfColLength-1].std(axis=1)
df['mean20std_mn_deaths'] = (df.mean_mn_deaths + (2*df.stddev_mn_deaths))
df['mean15std_mn_deaths'] = (df.mean_mn_deaths + (1.5*df.stddev_mn_deaths))
# Mid period for seasonal = mean of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
df['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.getStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
df['dateOfOnSetWeek'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
df['firstCaseDate'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).format('YYYY-MM-DD')
df['lastCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).format('YYYY-MM-DD')
df['endDate'] = ""
df['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).format('YYYY-MM-DD')
df['reportingOrgUnitName'] = df.iloc[:,reportingLevel-1]
df['reportingOrgUnit'] = df.iloc[:,detectionLevel].apply(self.getOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
df['orgUnit'] = df.iloc[:,detectionLevel]
df['orgUnitName'] = df.iloc[:,detectionLevel+1]
df['orgUnitCode'] = df.iloc[:,detectionLevel+2]
dropColumns = [col for idx,col in enumerate(df.columns.values.tolist()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
df.drop(columns=dropColumns,inplace=True)
df['confirmedValue'] = df.loc[:,'mean_current_cases']
df['deathValue'] = df.loc[:,'mean_current_deaths']
df['suspectedValue'] = df.loc[:,'mean_current_cases']
df['disease'] = diseaseMeta['disease']
df['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "mean_current_cases >= mean20std_mn_cases & mean_current_cases != 0 & mean20std_mn_cases != 0"
df.query(checkEpidemic,inplace=True)
if df.empty is True:
df['alert'] = "false"
if df.empty is not True:
df['epidemic'] = 'true'
# Filter out those greater or equal to threshold
df = df[df['epidemic'] == 'true']
df['active'] = "true"
df['alert'] = "true"
df['reminder'] = "false"
#df['epicode']=df['orgUnitCode'].str.cat('E',sep="_")
df['epicode'] = df.apply(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "df['epidemic'] == 'true' && df['active'] == 'true' && df['reminder'] == 'false'"
closedVigilanceQuery = "df['epidemic'] == 'true' && df['active'] == 'true' && df['reminder'] == 'true'"
df[['status','active','closeDate','reminderSent','dateReminderSent']] = df.apply(self.getEpidemicDetails,axis=1)
else:
# No data for cases found
pass
return df
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace all values with standard text
def replaceText(self,df):
df.replace(to_replace='Confirmed case',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='Suspected case',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='Confirmed',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='Suspected',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='confirmed case',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='suspected case',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='died',value='deathValue',regex=True,inplace=True)
df.replace(to_replace='Died case',value='deathValue',regex=True,inplace=True)
return df
# Get Confirmed,suspected cases and deaths
def getCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def getStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if pd.to_datetime(self.today) < pd.to_datetime(row['endDate']):
currentStatus='active'
elif pd.to_datetime(row['endDate']) == (pd.to_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'reminder':
if row['reminderDate'] == pd.to_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return pd.Series(currentStatus)
# get onset date
def getOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).format('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def getTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).format('YYYY-MM-DD')
# replace data of onset with event dates
def replaceDatesWithEventData(self,row):
if row['onSetDate'] == '':
return pd.to_datetime(row['eventdate'])
else:
return pd.to_datetime(row['onSetDate'])
# Get columns based on query or condition
def getQueryValue(self,df,query,column,inplace=True):
query = "{}={}".format(column,query)
df.eval(query,inplace)
return df
# Get columns based on query or condition
def queryValue(self,df,query,column=None,inplace=True):
df.query(query)
return df
# Get epidemic, closure and status
def getEpidemicDetails(self,row,columns=None):
details = []
if row['epidemic'] == "true" and row['active'] == "true" and row['reminder'] == "false":
details.append('Closed')
details.append('false')
details.append(self.today)
details.append('false')
details.append('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['reminder'] == "true":
details.append('Closed Vigilance')
details.append('true')
details.append(row['closeDate'])
details.append('true')
details.append(self.today)
# Send Reminder for closure
else:
details.append('Confirmed')
details.append('true')
details.append('')
details.append('false')
details.append('')
detailsSeries = tuple(details)
return pd.Series(detailsSeries)
# Get key id from dataelements
def getDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = pd.DataFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
df = self.createDataFrame(caseEvents,type)
caseEventsColumnsById = df.columns
dfColLength = len(df.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#df['dateOfOnSet'] = np.where(df['onSetDate']== '',pd.to_datetime(df['eventdate']).dt.strftime('%Y-%m-%d'),df['onSetDate'])
df['dateOfOnSet'] = df.apply(self.getOnSetDate,axis=1)
# Replace all text with standard text
df = self.replaceText(df)
# Transpose and Aggregate values
dfCaseClassification = df.groupby(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].value_counts().unstack().fillna(0).reset_index()
dfCaseImmediateOutcome = df.groupby(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].value_counts().unstack().fillna(0).reset_index()
dfTestResult = df.groupby(['ouname','ou','disease','dateOfOnSet'])['testResult'].value_counts().unstack().fillna(0).reset_index()
dfTestResultClassification = df.groupby(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].value_counts().unstack().fillna(0).reset_index()
dfStatusOutcome = df.groupby(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].value_counts().unstack().fillna(0).reset_index()
combinedDf = pd.merge(dfCaseClassification,dfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').merge(dfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').merge(dfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').merge(dfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = pd.to_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.apply(self.getCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.apply(self.getCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.apply(self.getCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
dfConfirmed = combinedDf.groupby(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['sum']).reset_index()
dfConfirmed.rename(columns={'sum':'confirmedValue' },inplace=True)
dfSuspected = combinedDf.groupby(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['sum']).reset_index()
dfSuspected.rename(columns={'sum':'suspectedValue' },inplace=True)
dfFirstAndLastCaseDate = df.groupby(['ouname','ou','disease'])['dateOfOnSet'].agg(['min','max']).reset_index()
dfFirstAndLastCaseDate.rename(columns={'min':'firstCaseDate','max':'lastCaseDate'},inplace=True)
aggDf = pd.merge(dfConfirmed,dfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').merge(dfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].apply(self.getOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].apply(self.getOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = pd.to_datetime(pd.to_datetime(dfDates['lastCaseDate']) + pd.to_timedelta(pd.np.ceil(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['reminderDate'] = pd.to_datetime(pd.to_datetime(aggDf['lastCaseDate']) + pd.to_timedelta(pd.np.ceil(2*aggDf['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
aggDf.rename(columns={'ouname':'orgUnitName','ou':'orgUnit'},inplace=True);
aggDf[['active']] = aggDf.apply(self.getStatus,args=['active'],axis=1)
aggDf[['reminder']] = aggDf.apply(self.getStatus,args=['reminder'],axis=1)
else:
df1 = df.iloc[:,(detectionLevel+4):dfColLength]
df.iloc[:,(detectionLevel+4):dfColLength] = df1.apply(pd.to_numeric,errors='coerce').fillna(0).astype(np.int64)
if(dateData['height'] > 0):
dfDates = self.createDataFrame(dateData,'DATES')
dfDates.to_csv('aggDfDates.csv',encoding='utf-8')
dfDates.rename(columns={dfDates.columns[7]:'disease',dfDates.columns[8]:'dateOfOnSet'},inplace=True)
dfDates['dateOfOnSet'] = dfDates.apply(self.getTeiOnSetDate,axis=1)
dfDates = dfDates.groupby(['ou','disease'])['dateOfOnSet'].agg(['min','max']).reset_index()
dfDates.rename(columns={'min':'firstCaseDate','max':'lastCaseDate'},inplace=True)
df = pd.merge(df,dfDates,right_on=['ou'],left_on=['organisationunitid'],how='left')
df['incubationDays'] = int(diseaseMeta['incubationDays'])
df['endDate'] = pd.to_datetime(pd.to_datetime(df['lastCaseDate']) + pd.to_timedelta( | pd.np.ceil(2*df['incubationDays']) | pandas.np.ceil |
# functions to analyze the results in python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from concise.utils.helper import merge_dicts
# make a report
def get_cv_accuracy(res):
"""
Extract the cv accuracy from the model
"""
ac_list = [(accuracy["train_acc_final"],
accuracy["test_acc_final"]
)
for accuracy, weights in res]
ac = np.array(ac_list)
perf = {
"mean_train_acc": np.mean(ac[:, 0]),
"std_train_acc": np.std(ac[:, 0]),
"mean_test_acc": np.mean(ac[:, 1]),
"std_test_acc": np.std(ac[:, 1]),
}
return perf
def get_kwargs_cv_accuracy(cv_res, i=None, filename=None):
a = cv_res['kwargs']
b = get_cv_accuracy(cv_res['output'])
dic = merge_dicts(a, b)
# append i if neccesary
if i is not None:
dic = merge_dicts(dic, {'i': i})
if i is not None:
dic = merge_dicts(dic, {'filename': filename})
# append motifs, execution time and features:
dic = merge_dicts(dic, {'features': cv_res.get('features', None)})
dic = merge_dicts(dic, {'execution_time': cv_res.get('execution_time', None)})
dic = merge_dicts(dic, {'motifs': cv_res.get('motifs', None)})
return dic
# update this function
def cv_list2dt(cv_list):
perf_list = [get_kwargs_cv_accuracy(res, i=i, filename=filename) for res, i, filename in cv_list]
dt = | pd.DataFrame(perf_list) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## Observations and Insights
#
# In[4]:
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = | pd.read_csv(study_results_path) | pandas.read_csv |
from .genometric_space import GenometricSpace
from .dataset.parser.parser import Parser
import pandas as pd
import warnings
import numpy as np
class MultiRefModel:
"""
GenometricSpace class to represent data that are mapped with multiple references
"""
def __init__(self):
"""
Constructor
"""
self.data_model = []
return
def load(self, path, genes_uuid, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False):
"""
Loads the multi referenced mapped data from the file system
:param path: The path to the files
:param genes_uuid: The unique identifier metadata column name to separate the data by the number of references
:param regs: The region data that are to be analyzed
:param meta: The metadata that are to be analyzed
:param values: The values to fill the matrix
:param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0)
values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix.
(unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets.
If the full_load parameter is True then all the zero(0) data are going to be read.
"""
if not full_load:
warnings.warn("\n\n You are using the optimized loading technique. "
"All-zero rows are not going to be loaded into memory. "
"To load all the data please set the full_load parameter equal to True.")
p = Parser(path)
all_meta_data = p.parse_meta(meta)
all_data = p.parse_data(regs, values, full_load)
all_data = pd.pivot_table(all_data,
values=values, columns=regs, index=['sample'],
fill_value=0)
group1 = all_meta_data.groupby([genes_uuid]).count()
for g in group1.index.values:
series = all_meta_data[genes_uuid] == g
m = (all_meta_data[series])
d = (all_data.loc[series]).dropna(axis=1, how='all') # not to show the NaN data
self.data_model.append(GenometricSpace.from_memory(d, m))
self.all_meta_data = all_meta_data
def merge(self, samples_uuid):
"""
The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe
"""
all_meta_data = pd.DataFrame()
for dm in self.data_model:
all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0)
group = all_meta_data.groupby([samples_uuid])['sample']
sample_sets = group.apply(list).values
merged_df = pd.DataFrame()
multi_index = list(map(list, zip(*sample_sets)))
multi_index_names = list(range(0, len(sample_sets[0])))
i = 1
for pair in sample_sets:
i += 1
numbers = list(range(0, len(pair)))
df_temp = pd.DataFrame()
for n in numbers:
try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False)
df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1)
except:
pass
merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0)
multi_index = np.asarray(multi_index)
multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names)
merged_df.index = multi_index
return merged_df
def compact_view(self, merged_data, selected_meta, reference_no):
"""
Creates and returns the compact view where the index of the dataframe is a multi index of the selected metadata.
Side effect: Alters the merged_data parameter
:param merged_data: The merged data that is to be used to create the compact view
:param selected_meta: The selected metadata to create the multi index
:param reference_no: The reference number that the metadata are going to be taken
:return: Returns the multi-indexed dataframe w.r.t. the selected metadata
"""
meta_names = list(selected_meta)
meta_index = []
for x in meta_names:
meta_index.append(self.all_meta_data.ix[merged_data.index.get_level_values(reference_no)][x].values)
meta_index = np.asarray(meta_index)
multi_meta_index = | pd.MultiIndex.from_arrays(meta_index, names=meta_names) | pandas.MultiIndex.from_arrays |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import boto3
from dash.dependencies import Input, Output
from datetime import datetime
from glob import glob
from urllib.request import urlopen
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from io import StringIO
import io
import json
import numpy as np
import pandas as pd
import plotly.express as px
import requests
import xml.etree.ElementTree as ET
import time
import pytz
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
s3 = boto3.resource('s3')
# re-scan the dashboard data files to pick up updates
# set to load every 60 seconds; data will refresh every 1 minutes
# number of seconds between re-calculating the data
UPDATE_INTERVAL = 60*5
def get_new_data_PA():
"""Updates the global variable 'data' with new data"""
global EN_PA_df
EN_PA_df = pd.read_csv('https://en2020.s3.amazonaws.com/penn_dash.csv')
EN_PA_df['County'] = EN_PA_df.CountyName
def get_new_data_FL():
"""Updates the global variable 'data' with new data"""
global EN_FL_df
EN_FL_df = pd.read_csv('https://en2020.s3.amazonaws.com/florida_dash.csv')
EN_FL_df['County'] = EN_FL_df.CountyName
def get_new_data_MI():
"""Updates the global variable 'data' with new data"""
global EN_MI_df
EN_MI_df = pd.read_csv('https://en2020.s3.amazonaws.com/mich_dash.csv')
EN_MI_df['County'] = EN_MI_df.CountyName
def get_new_data_NC():
"""Updates the global variable 'data' with new data"""
global EN_NC_df
EN_NC_df = pd.read_csv('https://en2020.s3.amazonaws.com/ncar_dash.csv')
EN_NC_df['County'] = EN_NC_df.CountyName
def get_new_data_every(period=UPDATE_INTERVAL):
"""Update the data every 'period' seconds"""
while True:
# print("updating....")
# refresh_live_data()
# print('data refreshed')
get_new_data_PA()
get_new_data_FL()
get_new_data_MI()
get_new_data_NC()
timestamp = datetime.now().strftime("%I:%M%p %z %b %d %Y")
print("data updated %s" % timestamp)
time.sleep(period)
# get_new_data_PA()
# get_new_data_FL()
# Run the function in another thread
executor = ThreadPoolExecutor(max_workers=1)
executor.submit(get_new_data_every)
###############################################
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
external_stylesheets = ['https://cdn.jsdelivr.net/npm/water.css@2/out/light.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
def tall_EN_df(df):
biden_vote_df = pd.DataFrame(df[['DEM_20_raw','COUNTY_CAT','County']])
biden_vote_df.columns = ['Votes','Category','County']
biden_vote_df['Candidate'] = "Biden"
trump_vote_df = pd.DataFrame(df[['REP_20_raw','COUNTY_CAT','County']])
trump_vote_df.columns = ['Votes','Category','County']
trump_vote_df['Candidate'] = "Trump"
other_vote_df = pd.DataFrame(df[['OTHER_20_raw','COUNTY_CAT','County']])
other_vote_df.columns = ['Votes','Category','County']
other_vote_df['Candidate'] = "Other"
remaining_vote_df = pd.DataFrame(df[['Expected_20_Vote_Remaining','COUNTY_CAT','County']])
remaining_vote_df.columns = ['Votes','Category','County']
remaining_vote_df['Candidate'] = "Remaining"
table_df = pd.concat([biden_vote_df,trump_vote_df,other_vote_df], axis=0)
return table_df
def tall_EN_df_reg(df):
reg_df = pd.DataFrame(df[['Reg_20_Total','COUNTY_CAT','County']])
reg_df.columns = ['Votes','Category','County']
reg_df['Category'] = 'Registered Voters'
expected_df = pd.DataFrame(df[['Expected_2020_Vote','COUNTY_CAT','County']])
expected_df.columns = ['Votes','Category','County']
expected_df['Category'] = 'Expected Votes'
remaining_df = pd.DataFrame(df[['Expected_20_Vote_Remaining','COUNTY_CAT','County']])
remaining_df.columns = ['Votes','Category','County']
remaining_df['Category'] = 'Remaining Votes'
table_df = pd.concat([reg_df,expected_df,remaining_df], axis=0)
return table_df
def make_table(state1, state2, state3, state4):
PA_summary_table = tall_EN_df(state1)
FL_summary_table = tall_EN_df(state2)
MI_summary_table = tall_EN_df(state3)
NC_summary_table = tall_EN_df(state4)
PA_summary_table['State'] = 'Penn'
FL_summary_table['State'] = 'Florida'
MI_summary_table['State'] = 'Michigan'
NC_summary_table['State'] = 'NCarolina'
summary_table = pd.concat([FL_summary_table,PA_summary_table,MI_summary_table,NC_summary_table])
summary_table = pd.pivot_table(summary_table, index=['Candidate'], values=('Votes'), \
columns=('State'), aggfunc=np.sum, margins=False).reset_index()
summary_table = summary_table.reindex([0,2,1])
summary_table['FL_pct'] = (summary_table.Florida / summary_table.Florida.sum() * 100)
summary_table['PA_pct'] = (summary_table.Penn / summary_table.Penn.sum() * 100)
summary_table['MI_pct'] = (summary_table.Michigan / summary_table.Michigan.sum() * 100)
summary_table['NC_pct'] = (summary_table.NCarolina / summary_table.NCarolina.sum() * 100)
summary_table = summary_table.append(summary_table.sum(numeric_only=True), ignore_index=True)
summary_table.at[3, 'Candidate'] = 'Total'
summary_table['Florida'] = summary_table['Florida'].map("{:,.0f}".format)
summary_table['Penn'] = summary_table['Penn'].map("{:,.0f}".format)
summary_table['Michigan'] = summary_table['Michigan'].map("{:,.0f}".format)
summary_table['NCarolina'] = summary_table['NCarolina'].map("{:,.0f}".format)
summary_table['FL_pct'] = summary_table['FL_pct'].map('{:,.2f}%'.format)
summary_table['PA_pct'] = summary_table['PA_pct'].map('{:,.2f}%'.format)
summary_table['MI_pct'] = summary_table['MI_pct'].map('{:,.2f}%'.format)
summary_table['NC_pct'] = summary_table['NC_pct'].map('{:,.2f}%'.format)
summary_table = summary_table[['Candidate','Florida','FL_pct','Penn','PA_pct','Michigan','MI_pct','NCarolina','NC_pct']]
return summary_table
# fig = go.Figure(data=[go.Table(
# header=dict(values=list(summary_table.columns),
# fill_color='paleturquoise',
# align='left'),
# cells=dict(values=[summary_table.Candidate,summary_table.Florida,summary_table.FL_pct,
# summary_table.Pennsylvania,summary_table.PA_pct],
# fill_color='lavender',
# align='left'))
# ])
# fig.show()
def make_vote_table(state1,state2,state3,state4):
PA_reg_table = tall_EN_df_reg(state1)
FL_reg_table = tall_EN_df_reg(state2)
MI_reg_table = tall_EN_df_reg(state3)
NC_reg_table = tall_EN_df_reg(state4)
PA_reg_table['State'] = 'Pennsylvania'
FL_reg_table['State'] = 'Florida'
MI_reg_table['State'] = 'Michigan'
NC_reg_table['State'] = 'NorthCarolina'
reg_table = pd.concat([FL_reg_table,PA_reg_table,MI_reg_table,NC_reg_table])
reg_table = pd.pivot_table(reg_table, index=['Category'], values=('Votes'), \
columns=('State'), aggfunc=np.sum, margins=False).reset_index()
reg_table['Florida'] = reg_table['Florida'].map("{:,.0f}".format)
reg_table['Pennsylvania'] = reg_table['Pennsylvania'].map("{:,.0f}".format)
reg_table['Michigan'] = reg_table['Michigan'].map("{:,.0f}".format)
reg_table['NorthCarolina'] = reg_table['NorthCarolina'].map("{:,.0f}".format)
FL_turnout = str(((EN_FL_df['Total_Vote_16'].sum() / EN_FL_df['Reg_16_Total'].sum())*100).round(1)) + "%"
PA_turnout = str(((EN_PA_df['Total_Vote_16'].sum() / EN_PA_df['Reg_16_Total'].sum())*100).round(1)) + "%"
MI_turnout = str(((EN_MI_df['Total_Vote_16'].sum() / EN_MI_df['Reg_16_Total'].sum())*100).round(1)) + "%"
NC_turnout = str(((EN_NC_df['Total_Vote_16'].sum() / EN_NC_df['Reg_16_Total'].sum())*100).round(1)) + "%"
turnout = | pd.DataFrame([['2016 Turnout',FL_turnout,PA_turnout,MI_turnout,NC_turnout]], columns = ['Category','Florida','Pennsylvania','Michigan','NCarolina']) | pandas.DataFrame |
import ast
import json
import os
import sys
import uuid
import lxml
import networkx as nx
import pandas as pd
import geopandas as gpd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry import LineString, Polygon, Point
from genet.core import Network
from genet.inputs_handler import matsim_reader
from tests.test_outputs_handler_matsim_xml_writer import network_dtd, schedule_dtd
from genet.schedule_elements import Route, Service, Schedule
from genet.utils import plot, spatial
from genet.inputs_handler import read
from tests.fixtures import assert_semantically_equal, route, stop_epsg_27700, network_object_from_test_data, \
full_fat_default_config_path, correct_schedule, vehicle_definitions_config_path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
puma_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "network.xml"))
puma_schedule_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "schedule.xml"))
simplified_network = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "network.xml"))
simplified_schedule = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "schedule.xml"))
network_link_attrib_text_missing = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network_link_attrib_text_missing.xml"))
@pytest.fixture()
def network1():
n1 = Network('epsg:27700')
n1.add_node('101982',
{'id': '101982',
'x': '528704.1425925883',
'y': '182068.78193707118',
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n1.add_node('101986',
{'id': '101986',
'x': '528835.203274008',
'y': '182006.27331298392',
'lon': -0.14439428709377497,
'lat': 51.52228713323965,
's2_id': 5221390328605860387})
n1.add_link('0', '101982', '101986',
attribs={'id': '0',
'from': '101982',
'to': '101986',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390328605860387,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n1
@pytest.fixture()
def network2():
n2 = Network('epsg:4326')
n2.add_node('101982',
{'id': '101982',
'x': -0.14625948709424305,
'y': 51.52287873323954,
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n2.add_node('101990',
{'id': '101990',
'x': -0.14770188709624754,
'y': 51.5205729332399,
'lon': -0.14770188709624754,
'lat': 51.5205729332399,
's2_id': 5221390304444511271})
n2.add_link('0', '101982', '101990',
attribs={'id': '0',
'from': '101982',
'to': '101990',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390304444511271,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n2
def test_network_graph_initiates_as_not_simplififed():
n = Network('epsg:27700')
assert not n.graph.graph['simplified']
def test__repr__shows_graph_info_and_schedule_info():
n = Network('epsg:4326')
assert 'instance at' in n.__repr__()
assert 'graph' in n.__repr__()
assert 'schedule' in n.__repr__()
def test__str__shows_info():
n = Network('epsg:4326')
assert 'Graph info' in n.__str__()
assert 'Schedule info' in n.__str__()
def test_reproject_changes_x_y_values_for_all_nodes(network1):
network1.reproject('epsg:4326')
nodes = dict(network1.nodes())
correct_nodes = {
'101982': {'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'101986': {'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}}
target_change_log = pd.DataFrame(
{'timestamp': {3: '2020-07-09 19:50:51', 4: '2020-07-09 19:50:51'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'node', 4: 'node'}, 'old_id': {3: '101982', 4: '101986'},
'new_id': {3: '101982', 4: '101986'}, 'old_attributes': {
3: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'new_attributes': {
3: "{'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'diff': {3: [('change', 'x', ('528704.1425925883', -0.14625948709424305)),
('change', 'y', ('182068.78193707118', 51.52287873323954))],
4: [('change', 'x', ('528835.203274008', -0.14439428709377497)),
('change', 'y', ('182006.27331298392', 51.52228713323965))]}}
)
assert_semantically_equal(nodes, correct_nodes)
for i in [3, 4]:
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'old_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'old_attributes']))
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'new_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'new_attributes']))
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), target_change_log[cols_to_compare],
check_dtype=False)
def test_reproject_delegates_reprojection_to_schedules_own_method(network1, route, mocker):
mocker.patch.object(Schedule, 'reproject')
network1.schedule = Schedule(epsg='epsg:27700', services=[Service(id='id', routes=[route])])
network1.reproject('epsg:4326')
network1.schedule.reproject.assert_called_once_with('epsg:4326', 1)
def test_reproject_updates_graph_crs(network1):
network1.reproject('epsg:4326')
assert network1.graph.graph['crs'] == {'init': 'epsg:4326'}
def test_reprojecting_links_with_geometries():
n = Network('epsg:27700')
n.add_nodes({'A': {'x': -82514.72274, 'y': 220772.02798},
'B': {'x': -82769.25894, 'y': 220773.0637}})
n.add_links({'1': {'from': 'A', 'to': 'B',
'geometry': LineString([(-82514.72274, 220772.02798),
(-82546.23894, 220772.88254),
(-82571.87107, 220772.53339),
(-82594.92709, 220770.68385),
(-82625.33255, 220770.45579),
(-82631.26842, 220770.40158),
(-82669.7309, 220770.04349),
(-82727.94946, 220770.79793),
(-82757.38528, 220771.75412),
(-82761.82425, 220771.95614),
(-82769.25894, 220773.0637)])}})
n.reproject('epsg:2157')
geometry_coords = list(n.link('1')['geometry'].coords)
assert round(geometry_coords[0][0], 7) == 532006.5605980
assert round(geometry_coords[0][1], 7) == 547653.3751768
assert round(geometry_coords[-1][0], 7) == 531753.4315189
assert round(geometry_coords[-1][1], 7) == 547633.5224837
def test_adding_the_same_networks():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_the_same_networks_but_with_differing_projections():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right.reproject('epsg:4326')
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_node_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('20', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_link_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('10', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_multiindices():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk', 'bike']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 2
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
assert n_left.graph['1']['2'][0] == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_disjoint_networks_with_unique_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('20', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('100', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {'10': {'id': '1', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 1},
'20': {'id': '2', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 2},
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954,
's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965,
's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'100': {'modes': ['walk'], 'from': '10', 'to': '20', 'id': '100'},
'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_disjoint_networks_with_clashing_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('2', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 4
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_simplified_network_and_not_throws_error():
n = Network('epsg:2770')
m = Network('epsg:2770')
m.graph.graph['simplified'] = True
with pytest.raises(RuntimeError) as error_info:
n.add(m)
assert "cannot add" in str(error_info.value)
def test_print_shows_info(mocker):
mocker.patch.object(Network, 'info')
n = Network('epsg:27700')
n.print()
n.info.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker):
mocker.patch.object(plot, 'plot_graph_routes')
n = Network('epsg:27700')
n.plot()
plot.plot_graph_routes.assert_called_once()
def test_plot_graph_delegates_to_util_plot_plot_graph(mocker):
mocker.patch.object(plot, 'plot_graph')
n = Network('epsg:27700')
n.plot_graph()
plot.plot_graph.assert_called_once()
def test_plot_schedule_delegates_to_util_plot_plot_non_routed_schedule_graph(mocker, network_object_from_test_data):
mocker.patch.object(plot, 'plot_non_routed_schedule_graph')
n = network_object_from_test_data
n.plot_schedule()
plot.plot_non_routed_schedule_graph.assert_called_once()
def test_attempt_to_simplify_already_simplified_network_throws_error():
n = Network('epsg:27700')
n.graph.graph["simplified"] = True
with pytest.raises(RuntimeError) as error_info:
n.simplify()
assert "cannot simplify" in str(error_info.value)
def test_simplifing_puma_network_results_in_correct_record_of_removed_links_and_expected_graph_data():
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
link_ids_pre_simplify = set(dict(n.links()).keys())
n.simplify()
assert n.is_simplified()
link_ids_post_simplify = set(dict(n.links()).keys())
assert link_ids_post_simplify & link_ids_pre_simplify
new_links = link_ids_post_simplify - link_ids_pre_simplify
deleted_links = link_ids_pre_simplify - link_ids_post_simplify
assert set(n.link_simplification_map.keys()) == deleted_links
assert set(n.link_simplification_map.values()) == new_links
assert (set(n.link_id_mapping.keys()) & new_links) == new_links
report = n.generate_validation_report()
assert report['routing']['services_have_routes_in_the_graph']
assert report['schedule']['schedule_level']['is_valid_schedule']
def test_simplified_network_saves_to_correct_dtds(tmpdir, network_dtd, schedule_dtd):
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
n.simplify()
n.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
generated_schedule_file_path = os.path.join(tmpdir, 'schedule.xml')
xml_obj = lxml.etree.parse(generated_schedule_file_path)
assert schedule_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
schedule_dtd.error_log.filter_from_errors())
def test_simplifying_network_with_multi_edges_resulting_in_multi_paths():
n = Network('epsg:27700')
n.add_nodes({
'n_-1': {'x': -1, 'y': -1, 's2_id': -1},
'n_0': {'x': 0, 'y': 0, 's2_id': 0},
'n_1': {'x': 1, 'y': 1, 's2_id': 1},
'n_2': {'x': 2, 'y': 2, 's2_id': 2},
'n_3': {'x': 3, 'y': 3, 's2_id': 3},
'n_4': {'x': 4, 'y': 4, 's2_id': 4},
'n_5': {'x': 5, 'y': 5, 's2_id': 5},
'n_6': {'x': 6, 'y': 5, 's2_id': 6},
})
n.add_links({
'l_-1': {'from': 'n_-1', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_0': {'from': 'n_0', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_1': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_2': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_3': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_4': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_5': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_6': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_7': {'from': 'n_4', 'to': 'n_5', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_8': {'from': 'n_4', 'to': 'n_6', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}}
})
n.simplify()
assert set(n.link_simplification_map) == {'l_4', 'l_1', 'l_5', 'l_3', 'l_6', 'l_2'}
def test_reading_back_simplified_network():
# simplified networks have additional geometry attribute and some of their attributes are composite, e.g. links
# now refer to a number of osm ways each with a unique id
n = read.read_matsim(path_to_network=simplified_network, epsg='epsg:27700',
path_to_schedule=simplified_schedule)
number_of_simplified_links = 659
links_with_geometry = n.extract_links_on_edge_attributes(conditions={'geometry': lambda x: True})
assert len(links_with_geometry) == number_of_simplified_links
for link in links_with_geometry:
attribs = n.link(link)
if 'attributes' in attribs:
assert not 'geometry' in attribs['attributes']
for k, v in attribs['attributes'].items():
if isinstance(v['text'], str):
assert not ',' in v['text']
def test_network_with_missing_link_attribute_elem_text_is_read_and_able_to_save_again(tmpdir):
n = read.read_matsim(path_to_network=network_link_attrib_text_missing, epsg='epsg:27700')
n.write_to_matsim(tmpdir)
def test_node_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_node(1, {'a': {'b': 1}})
n.add_node(2, {'a': {'b': 4}})
output_series = n.node_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_node(1, {'b': 1})
n.add_node(2, {'b': 4})
output_series = n.node_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_keys(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'])
df_to_compare = pd.DataFrame({'x': {'101982': '528704.1425925883', '101986': '528835.203274008'},
'y': {'101982': '182068.78193707118', '101986': '182006.27331298392'}})
assert_frame_equal(df, df_to_compare)
def test_node_attribute_data_under_keys_with_named_index(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'], index_name='index')
assert df.index.name == 'index'
def test_node_attribute_data_under_keys_generates_key_for_nested_data(network1):
network1.add_node('1', {'key': {'nested_value': {'more_nested': 4}}})
df = network1.node_attribute_data_under_keys([{'key': {'nested_value': 'more_nested'}}])
assert isinstance(df, pd.DataFrame)
assert 'key::nested_value::more_nested' in df.columns
def test_node_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.node_attribute_data_under_keys(['x'], index_name='index')
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': {'b': 1}})
n.add_link('1', 1, 2, attribs={'a': {'b': 4}})
output_series = n.link_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'b': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
output_series = n.link_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_keys(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'])
df_to_compare = pd.DataFrame({'modes': {'0': ['car']}, 'freespeed': {'0': 4.166666666666667},
'capacity': {'0': 600.0}, 'permlanes': {'0': 1.0}})
assert_frame_equal(df, df_to_compare)
def test_link_attribute_data_under_keys_with_named_index(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'], index_name='index')
assert df.index.name == 'index'
def test_link_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.link_attribute_data_under_keys(['modes'])
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_keys_generates_key_for_nested_data(network1):
df = network1.link_attribute_data_under_keys([{'attributes': {'osm:way:access': 'text'}}])
assert isinstance(df, pd.DataFrame)
assert 'attributes::osm:way:access::text' in df.columns
def test_add_node_adds_node_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
assert n.graph.has_node(1)
assert n.node(1) == {'a': 1}
def test_add_node_adds_node_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_node(1)
assert n.node(1) == {}
assert n.graph.has_node(1)
def test_add_multiple_nodes():
n = Network('epsg:27700')
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {'x': 1, 'y': 2, 'id': 1}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert reindexing_dict == {}
def test_add_nodes_with_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
def test_add_nodes_with_multiple_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
n.add_node(2, {})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {}
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
assert 2 in reindexing_dict
assert n.graph.has_node(reindexing_dict[2])
assert n.node(reindexing_dict[2]) == {'x': 2, 'y': 2, 'id': reindexing_dict[2]}
def test_add_edge_generates_a_link_id_and_delegated_to_add_link_id(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, None, {'a': 1}, False)
def test_add_edge_generates_a_link_id_with_specified_multiidx(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, multi_edge_idx=10, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, 10, {'a': 1}, False)
def test_adding_multiple_edges():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
if n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
elif n.link_id_mapping['1'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['0'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
else:
raise AssertionError()
def test_adding_multiple_edges_between_same_nodes():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.number_of_edges(1, 2) == 3
assert n.graph.has_edge(2, 3)
assert len(n.link_id_mapping) == 4
def test_add_link_adds_edge_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
assert n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.edge(1, 2) == {0: {'a': 1, 'from': 1, 'id': '0', 'to': 2}}
def test_add_link_adds_edge_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
def test_adding_multiple_links():
n = Network('epsg:27700')
n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
assert '0' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '1' in n.link_id_mapping
assert '0' in reindexing_dict
assert len(n.link_id_mapping) == 3
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs['1'], {'from': 2, 'to': 3, 'id': '1'})
def test_adding_multiple_links_with_multiple_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
n.add_link('1', 10, 20)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 4
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs[reindexing_dict['1']], {'from': 2, 'to': 3, 'id': reindexing_dict['1']})
def test_adding_loads_of_multiple_links_between_same_nodes():
n = Network('epsg:27700')
reindexing_dict, links_and_attribs = n.add_links({i: {'from': 1, 'to': 2} for i in range(10)})
assert_semantically_equal(links_and_attribs, {i: {'from': 1, 'to': 2, 'id': i} for i in range(10)})
assert_semantically_equal(n.link_id_mapping, {i: {'from': 1, 'to': 2, 'multi_edge_idx': i} for i in range(10)})
def test_adding_multiple_links_with_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
n.add_links({'2': {'from': 1, 'to': 2}, '3': {'from': 1, 'to': 2}, '4': {'from': 2, 'to': 3}})
assert n.link_id_mapping['2'] == {'from': 1, 'to': 2, 'multi_edge_idx': 2}
assert n.link_id_mapping['3'] == {'from': 1, 'to': 2, 'multi_edge_idx': 3}
assert n.link_id_mapping['4'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_and_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links(
{'0': {'from': 1, 'to': 2}, '1': {'from': 1, 'to': 2}, '2': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 5
assert_semantically_equal(n.link_id_mapping[reindexing_dict['0']], {'from': 1, 'to': 2, 'multi_edge_idx': 2})
assert_semantically_equal(n.link_id_mapping[reindexing_dict['1']], {'from': 1, 'to': 2, 'multi_edge_idx': 3})
def test_adding_multiple_links_missing_some_from_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_from_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_some_to_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_to_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_links_with_different_non_overlapping_attributes():
# generates a nan attribute for link attributes
n = Network('epsg:27700')
reindexing_dict, links_and_attributes = n.add_links({
'2': {'from': 1, 'to': 2, 'speed': 20},
'3': {'from': 1, 'to': 2, 'capacity': 123},
'4': {'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
assert reindexing_dict == {}
assert_semantically_equal(links_and_attributes, {
'2': {'id': '2', 'from': 1, 'to': 2, 'speed': 20},
'3': {'id': '3', 'from': 1, 'to': 2, 'capacity': 123},
'4': {'id': '4', 'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
def test_adding_multiple_links_to_same_edge_clashing_with_existing_edge():
n = Network('epsg:27700')
n.add_link(link_id='0', u='2', v='2', attribs={'speed': 20})
n.add_links({'1': {'from': '2', 'to': '2', 'something': 20},
'2': {'from': '2', 'to': '2', 'capacity': 123}})
assert_semantically_equal(dict(n.links()), {'0': {'speed': 20, 'from': '2', 'to': '2', 'id': '0'},
'1': {'from': '2', 'to': '2', 'something': 20.0, 'id': '1'},
'2': {'from': '2', 'to': '2', 'capacity': 123.0, 'id': '2'}})
assert_semantically_equal(n.link_id_mapping, {'0': {'from': '2', 'to': '2', 'multi_edge_idx': 0},
'1': {'from': '2', 'to': '2', 'multi_edge_idx': 1},
'2': {'from': '2', 'to': '2', 'multi_edge_idx': 2}})
def test_network_modal_subgraph_using_general_subgraph_on_link_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.subgraph_on_link_conditions(conditions={'modes': 'car'}, mixed_dtypes=True)
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={})
assert n.modes() == {'car', 'bike'}
def test_network_modal_subgraph_using_specific_modal_subgraph_method_single_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.modal_subgraph(modes='car')
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_network_modal_subgraph_using_specific_modal_subgraph_method_several_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_bike_graph = n.modal_subgraph(modes=['car', 'bike'])
assert list(car_bike_graph.edges) == [(1, 2, 0), (2, 3, 0), (2, 3, 1)]
def test_links_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_links = n.links_on_modal_condition(modes=['car'])
assert set(car_links) == {'0', '1'}
def test_nodes_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_nodes = n.nodes_on_modal_condition(modes=['car'])
assert set(car_nodes) == {1, 2, 3}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_nodes_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(test_geojson)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node(
'1', {'id': '1', 'x': 508400, 'y': 162050, 's2_id': spatial.generate_index_s2(51.3472033, 0.4449167)})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_links_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(test_geojson)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_intersection_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='intersect')
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_containement(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_s2_region(
network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_find_shortest_path_when_graph_has_no_extra_edge_choices():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_subgraph_is_pre_computed():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_g = n.modal_subgraph(modes='bike')
bike_route = n.find_shortest_path(1, 3, subgraph=bike_g)
assert bike_route == ['0', '2']
def test_find_shortest_path_defaults_to_full_graph():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'freespeed': 3})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'freespeed': 2})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3)
assert bike_route == ['0', '1']
def test_find_shortest_path_when_graph_has_extra_edge_choice_for_freespeed_that_is_obvious():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_graph_has_extra_edge_choice_with_attractive_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '3']
def test_find_shortest_path_and_return_just_nodes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('1', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
bike_route = n.find_shortest_path(1, 3, return_nodes=True)
assert bike_route == [1, 2, 3]
def test_add_link_adds_link_with_specific_multi_idx():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
def test_add_link_generates_new_multi_idx_if_already_exists():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
n.add_link('1', 1, 2, 0)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
assert n.link_id_mapping['1']['multi_edge_idx'] != 0
assert n.graph[1][2][n.link_id_mapping['1']['multi_edge_idx']] == {'from': 1, 'to': 2, 'id': '1'}
def test_reindex_node(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '007')
assert [id for id, attribs in network1.nodes()] == ['007', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '007'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('007', '101986')]
assert network1.link_id_mapping['0']['from'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:39:08', 4: '2020-06-08 19:39:08', 5: '2020-06-08 19:39:08'},
'change_event': {3: 'modify', 4: 'modify', 5: 'modify'}, 'object_type': {3: 'link', 4: 'node', 5: 'node'},
'old_id': {3: '0', 4: '101982', 5: '101982'}, 'new_id': {3: '0', 4: '007', 5: '101982'}, 'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'new_attributes': {
3: "{'id': '0', 'from': '007', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'diff': {3: [('change', 'from', ('101982', '007'))],
4: [('change', 'id', ('101982', '007')), ('change', 'id', ('101982', '007'))],
5: [('change', 'id', ('101982', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(3), correct_change_log_df[cols_to_compare],
check_names=False,
check_dtype=False)
def test_reindex_node_when_node_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '101986')
node_ids = [id for id, attribs in network1.nodes()]
assert '101986' in node_ids
assert '101982' not in node_ids
assert len(set(node_ids)) == 2
assert network1.node(node_ids[0]) != network1.node(node_ids[1])
def test_reindex_link(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert '0' in network1.link_id_mapping
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '0'
network1.reindex_link('0', '007')
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['007']
assert '0' not in network1.link_id_mapping
assert '007' in network1.link_id_mapping
assert network1.link('007')['from'] == '101982'
assert network1.link('007')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:34:48', 4: '2020-06-08 19:34:48'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'link', 4: 'link'}, 'old_id': {3: '0', 4: '0'}, 'new_id': {3: '007', 4: '0'},
'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'new_attributes': {
3: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'diff': {3: [('change', 'id', ('0', '007')), ('change', 'id', ('0', '007'))],
4: [('change', 'id', ('0', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_names=False, check_dtype=False)
def test_reindex_link_when_link_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
network1.add_link('1', '101986', '101982', attribs={})
network1.reindex_link('0', '1')
link_ids = [id for id, attribs in network1.links()]
assert '1' in link_ids
assert '0' not in link_ids
assert len(set(link_ids)) == 2
assert network1.link(link_ids[0]) != network1.link(link_ids[1])
def test_modify_node_adds_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'b': 1})
assert n.node(1) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 1, 'b': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_names=False,
check_dtype=False)
def test_modify_node_overwrites_existing_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'a': 4})
assert n.node(1) == {'a': 4}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 4}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('change', 'a', (1, 4))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def test_modify_nodes_adds_and_changes_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.add_node(2, {'b': 1})
n.apply_attributes_to_nodes({1: {'a': 4}, 2: {'a': 1}})
assert n.node(1) == {'a': 4}
assert n.node(2) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-06-01 15:07:51', 1: '2020-06-01 15:07:51', 2: '2020-06-01 15:07:51',
3: '2020-06-01 15:07:51'}, 'change_event': {0: 'add', 1: 'add', 2: 'modify', 3: 'modify'},
'object_type': {0: 'node', 1: 'node', 2: 'node', 3: 'node'}, 'old_id': {0: None, 1: None, 2: 1, 3: 2},
'new_id': {0: 1, 1: 2, 2: 1, 3: 2}, 'old_attributes': {0: None, 1: None, 2: "{'a': 1}", 3: "{'b': 1}"},
'new_attributes': {0: "{'a': 1}", 1: "{'b': 1}", 2: "{'a': 4}", 3: "{'b': 1, 'a': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)]), ('add', 'id', 2)],
2: [('change', 'a', (1, 4))], 3: [('add', '', [('a', 1)])]}
})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
| assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False) | pandas.testing.assert_frame_equal |
from argparse import ArgumentParser
import json
import scipy.io as sio
import sys
import os
import pandas as pd
import numpy as np
def parse_options():
parser = ArgumentParser()
#parser.add_argument("-a", "--all", required=False, default=False,
# action="store_true",
# help="Run all the ML algorithms.")
parser.add_argument("-n", "--number_iterations", required=False,
default=100, type=int,
help="Number of iterations to run the cross validation")
parser.add_argument("-k", "--kFold", required=False,
default=10, type=int,
help="k fold number in Stratified Cross Validation")
parser.add_argument("-d", "--data", required=False,
default="../../Data", type=str,
help="Path to data folder")
parser.add_argument("-m", "--model", required=False,
default="all", type=str,
help="Model name to run. pass 'all' to run all the models")
parser.add_argument("-o", "--output", required=False,
default="outputs/", type=str,
help="Output Folder where results are to stored")
parser.add_argument("--missing_data", required=False,
default=0, type=int,
help="0-> fill it with 0; 1-> Mean Replacement; 2-> Median Replacement")
parser.add_argument("-c", "--combine", required=False,
default=False, action="store_true",
help="An indicator to combine 2 contrasts side by side keeping subject number in mind")
parser.add_argument("--normalize", required=False,
default=False, action="store_true",
help="An indicator to specify the normalization of both training and testing set before every "
"fold in cross validation of SVM RBF Kernel hyperparameter tuning ")
parser.add_argument('-i', '--input', required=True,
default='out/output_scores_testing/Faces_con_0001&Faces_con_0001_389.csv', type=str,
help='Path to input csv file which contains information about the scores ')
parser.add_argument('-dt', '--data_type', required=False,
default='face__aal' , type=str,
help='brodmann for Brodmann data type and face_aal for AAL data_type')
parser.add_argument('-ad', '--additional_data', required=False,
default='../data_info', type=str,
help='Path to folder which contains additional information of the data')
parser.add_argument('-ag','--age_gender', required=False,
default='age', type=str,
help='Pass age to run relevance of age info to the data and gender to check for gender')
parser.add_argument('-mf','--mat_file', required=False,
default='nBack_con_0003.mat', type=str,
help='Matfile name to run experiments on a particular contrast. '
)
parser.add_argument('-cl','--class_label', required=False,
default='12', type=str,
help='class labels: 1 for BD, 2: for Schizo and 3 for control. 12, 23 and 31 are for ' \
'combinations of the same ')
options = parser.parse_args()
return options
def data_extraction(data_folder, nClass, mat_file = "Faces_con_0001.mat", type='face_aal' ):
"""
This function currently reads single contrast
:param data_folder: Path to the folder that contains Data
:param nClass: 2: for divinding the labels for biclass, 3: all 3 class in same dataframe
:return: df: When nClass=3 Single panda dataframe containing means of various Region of interest (ROI) of Brain of all the three classes combined
df1, df2, df3: Separated dataframes for each class when nClass is 2
"""
# ----------------- Brodmann---------------------
if type=='brodmann':
contrast_name = mat_file.split(".")[0]
data = sio.loadmat(data_folder + "/" + mat_file)
# Extract Roi names for the DataFrame column names
RoiNames = (data["roiName"][0])
colRoi = []
for i in range(len(RoiNames)):
colRoi.append(data["roiName"][0][i][0])
# prepare ROI data to add it to the dataFrame
data_list = []
[data_list.append(data["datas"][i]) for i in range(len(data["datas"]))]
# Adding all values to the DataFrame: ROI, label and subject id
df = | pd.DataFrame(data_list, columns=colRoi, dtype=np.float64) | pandas.DataFrame |
'''
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
GDELTbase.py
Class for creating/maintaining data directory structure, bulk downloading of
GDELT files with column reduction, parsing/cleaning to JSON format, and export
of cleaned records to MongoDB.
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
See license.txt for information related to each open-source library used.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations.
If those directories are not already present, a fallback method for
string-literal directory reorientation may be found in GDELTbase shared class
data at this tag: # A01a - backup path specification.
Any given user's project directory must be specified there.
See also GDELTeda.py, tag # A02b - Project directory path, as any given user's
project directory must be specified for that os.chdir() call, also.
Contents:
A00 - GDELTbase
A01 - shared class data (toolData, localDb)
A01a - backup path specification
Note: Specification at A01a should be changed to suit a user's desired
directory structure, given their local filesystem.
A02 - __init__ w/ instanced data (localFiles)
B00 - class methods
B01 - updateLocalFilesIndex
B02 - clearLocalFilesIndex
B03 - showLocalFiles
B04 - wipeLocalFiles
B05 - extensionToTableName
B06 - isFileDownloaded
B07 - downloadGDELTFile
B08 - downloadGDELTDay
B09 - cleanFile (includes the following field/subfield parser functions)
B09a - themeSplitter
B09b - locationsSplitter
B09c - personsSplitter
B09d - organizationsSplitter
B09e - toneSplitter
B09f - countSplitter
B09g - One-liner date conversion function for post-read_csv use
B09h - llConverter
B10 - cleanTable
B11 - mongoFile
B12 - mongoTable
C00 - main w/ testing
'''
import pandas as pd
import numpy as np
import os
import pymongo
import wget
import json
from time import time
from datetime import datetime, tzinfo
from zipfile import ZipFile as zf
from pprint import pprint as pp
from urllib.error import HTTPError
# A00
class GDELTbase:
'''Base object for GDELT data acquisition, cleaning, and storage.
Shared class data:
-----------------
toolData - dict with these key - value pairs:
URLbase - "http://data.gdeltproject.org/gdeltv2/"
path - os.path path objects, 'raw' and 'clean', per-table
names - lists of string column names, per-table, original and reduced
extensions - dict mapping table names to file extensions, per-table
columnTypes - dicts mapping table column names to appropriate types
localDb - dict with these key - value pairs:
client - pymongo.MongoClient()
database - pymongo.MongoClient().capstone
collections - dict mapping table names to suitable mongoDB collections
Instanced class data:
--------------------
localFiles - dict, per-table keys for lists of local 'raw' and 'clean'
filenames
Class methods:
-------------
updateLocalFilesIndex()
clearLocalFilesIndex()
showLocalFiles()
wipeLocalFiles()
extensionToTableName()
isFileDownloaded()
downloadGDELTFile()
downloadGDELTDay()
cleanFile()
cleanTable()
mongoFile()
mongoTable()
'''
# A01 - shared class data
toolData = {}
# A01a - backup path specification
# Failsafe path for local main project directory. Must be changed to suit
# location of any given end-user's 'script' directory in case directory
# 'GDELTdata' is not present one directory up.
toolData['projectPath'] = 'C:\\Users\\urf\\Projects\\WGU capstone'
# Controls generation of datafile download URLs in downloadGDELTDay()/File()
toolData['URLbase'] = "http://data.gdeltproject.org/gdeltv2/"
# Used in forming URLs for datafile download
toolData['extensions'] = {
'events' : "export.CSV.zip",
'gkg' : "gkg.csv.zip",
'mentions' : "mentions.CSV.zip",
}
# These paths are set relative to the location of this script, one directory
# up, in 'GDELTdata', parallel to the script directory.
toolData['path'] = {}
toolData['path']['base']= os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'GDELTdata')
toolData['path']['events'] = {
'table': os.path.join(toolData['path']['base'], 'events'),
'raw': os.path.join(toolData['path']['base'], 'events', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'events', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'events',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'events',
'realtimeClean')
}
toolData['path']['gkg'] = {
'table': os.path.join(toolData['path']['base'], 'gkg'),
'raw': os.path.join(toolData['path']['base'], 'gkg', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'gkg', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeClean')
}
toolData['path']['mentions'] = {
'table': os.path.join(toolData['path']['base'], 'mentions'),
'raw': os.path.join(toolData['path']['base'], 'mentions', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'mentions', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeClean')
}
# These mappings and lists are for recognition of all possible
# column names, and the specific discarding of a number of columns
# which have been predetermined as unnecessary in the context of
# simple EDA.
toolData['names'] = {}
toolData['names']['events'] = {
'original' : [
'GLOBALEVENTID',
'Day',
'MonthYear',
'Year',
'FractionDate',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1KnownGroupCode',
'Actor1EthnicCode',
'Actor1Religion1Code',
'Actor1Religion2Code',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2KnownGroupCode',
'Actor2EthnicCode',
'Actor2Religion1Code',
'Actor2Religion2Code',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'GoldsteinScale',
'NumMentions',
'NumSources',
'NumArticles',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_CountryCode',
'Actor1Geo_ADM1Code',
'Actor1Geo_ADM2Code',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor1Geo_FeatureID',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_CountryCode',
'Actor2Geo_ADM1Code',
'Actor2Geo_ADM2Code',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'Actor2Geo_FeatureID',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_CountryCode',
'ActionGeo_ADM1Code',
'ActionGeo_ADM2Code',
'ActionGeo_Lat',
'ActionGeo_Long',
'ActionGeo_FeatureID',
'DATEADDED',
'SOURCEURL',
],
'reduced' : [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
],
}
toolData['names']['gkg'] = {
'original' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCollectionIdentifier',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V21Counts',
'V1Themes',
'V2EnhancedThemes',
'V1Locations',
'V2EnhancedLocations',
'V1Persons',
'V2EnhancedPersons',
'V1Organizations',
'V2EnhancedOrganizations',
'V15Tone',
'V21EnhancedDates',
'V2GCAM',
'V21SharingImage',
'V21RelatedImages',
'V21SocialImageEmbeds',
'V21SocialVideoEmbeds',
'V21Quotations',
'V21AllNames',
'V21Amounts',
'V21TranslationInfo',
'V2ExtrasXML',
],
'reduced' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V1Themes',
'V1Locations',
'V1Persons',
'V1Organizations',
'V15Tone',
],
}
toolData['names']['mentions'] = {
'original' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'SentenceID', #
'Actor1CharOffset',#
'Actor2CharOffset',#
'ActionCharOffset',#
'InRawText',
'Confidence',
'MentionDocLen', #
'MentionDocTone',
'MentionDocTranslationInfo', #
'Extras', #
],
'reduced' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
],
}
# These mappings are used in automated dtype application to Pandas
# DataFrame collections of GDELT records, part of preprocessing.
toolData['columnTypes'] = {}
toolData['columnTypes']['events'] = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': pd.StringDtype(),
'Actor2Geo_Type': type(1),
'Actor2Geo_FullName': pd.StringDtype(),
'Actor2Geo_Lat': pd.StringDtype(),
'Actor2Geo_Long': pd.StringDtype(),
'ActionGeo_Type': type(1),
'ActionGeo_FullName': pd.StringDtype(),
'ActionGeo_Lat': pd.StringDtype(),
'ActionGeo_Long': | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
# coding: utf-8
from bs4 import BeautifulSoup
from tqdm import tqdm
import numpy as np
import yfinance as yf
import random
import json
import requests
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
import time
import os
import datetime
import argparse
headers = {
"Host": "api.nasdaq.com",
#"Connection": "keep-alive",
'sec-ch-ua': '"Microsoft Edge";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
"Accept": "application/json, text/plain, */*",
"sec-ch-ua-mobile": "?0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.44",
"sec-ch-ua-platform": '"Windows"',
"Origin": "https://www.nasdaq.com",
"Sec-Fetch-Site": "same-site",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://www.nasdaq.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6"
}
headers2 = {
"Host": "www.nasdaq.com",
'sec-ch-ua': '"Google Chromium";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
"Accept": "*/*",
"sec-ch-ua-mobile": "?0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36",
"sec-ch-ua-platform": '"Windows"',
"Origin": "https://www.nasdaq.com",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://www.nasdaq.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9"
}
http_proxy = "192.168.3.11:3128"
proxyDict = {
"http" : http_proxy,
"https" : http_proxy,
"ftp" : http_proxy
}
stock_col = ["Open","High","Low","Close","Adj Close","Volume"]
TODAY = datetime.datetime.now()
def save_file(data,stock,i):
if not os.path.exists('gs://xxx/news_data/'):
os.makedirs('gs://xxx/news_data/')
print("saving {} news to file...".format(stock))
file_dir = 'gs://xxx/news_data/'+stock+'_'+str(i)+'.csv'
data.to_csv(file_dir,index = False, encoding='utf_8_sig', header = True)
return
def impact_index(factor,d = [1,1,0.2],p = 2):
res = 0
for n,i in enumerate(factor[:-1]):
res += np.sign(i)*(abs(i*100)**p)*d[n]
decay = (factor[-1]+1)/2
if decay > 1:
delta = (decay-1)/10
decay = 1 + delta
return res*decay/3
def get_stock_data(data,date):
#data_np = data.value()
base_date = business_day_cal(date,-1)
fut_0 = business_day_cal(date,0)
fut_5 = business_day_cal(date,4)
fut_30 = business_day_cal(date,29)
if base_date not in data.index:
return None
if fut_0 not in data.index:
return None
if fut_5 not in data.index:
return None
if fut_30 not in data.index:
return None
res = []
# 1,5,30 percentage
res.append((data.loc[fut_0,'Close']-data.loc[base_date,'Close'])/data.loc[base_date,'Close'])
res.append((data.loc[fut_5,'Close']-data.loc[base_date,'Close'])/data.loc[base_date,'Close'])
res.append((data.loc[fut_30,'Close']-data.loc[base_date,'Close'])/data.loc[base_date,'Close'] )
# volumn 5 day percnetage
curr_r = data.index.get_loc(fut_0)
vol_pas_5 = data.iloc[curr_r-5:curr_r].sum(axis =0)["Volume"]
vol_fut_5 = data.iloc[curr_r:curr_r+5].sum(axis =0)["Volume"]
res.append((vol_fut_5 - vol_pas_5)/vol_pas_5)
res.append(impact_index(res))
return res
def business_day_cal(date,x):
US_BUSINESS_DAY = CustomBusinessDay(calendar= | USFederalHolidayCalendar() | pandas.tseries.holiday.USFederalHolidayCalendar |
#!/usr/bin/python
import os
import sys
import json
import itertools
import datetime
import numpy as np
import pandas as pd
from windpowerlib.wind_turbine import WindTurbine
from windpowerlib.wind_farm import WindFarm
from windpowerlib.turbine_cluster_modelchain import TurbineClusterModelChain
def load_data(path, filename, header=0):
df = pd.read_csv(path+filename, header=header, index_col=0, parse_dates=True)
return df
def preprocess_wind(df, target, features):
# Convert to standard indexing structure (ref_datetime, valid_datetime)
df.index.name = 'valid_datetime'
idx_ref_datetime = df.index.hour == 1
df.loc[idx_ref_datetime, 'ref_datetime'] = df.index[idx_ref_datetime]
df.loc[:, 'ref_datetime'] = df.loc[:, 'ref_datetime'].fillna(method='ffill')
df = df.set_index('ref_datetime', append=True, drop=True)[df.columns.levels[0][:-1]]
df.index = df.index.reorder_levels(['ref_datetime', 'valid_datetime'])
df = df.sort_index()
# Remove hidden ref_datetime column from multiindex
columns = [df.columns.levels[0][:-1].values, df.columns.levels[1][:-1].values]
df.columns = pd.MultiIndex.from_product(columns)
for farm in df.columns.levels[0]:
df_features = pd.DataFrame(index=df.index)
df_features.loc[:,'Utot10'] = np.sqrt(df.loc[:,(farm, 'U10')]**2+df.loc[:,(farm, 'V10')]**2)
df_features.loc[:,'Theta10'] = np.angle(df.loc[:,(farm, 'U10')]+df.loc[:,(farm, 'V10')]*1j, deg=True)
df_features.loc[:,'Utot100'] = np.sqrt(df.loc[:,(farm, 'U100')]**2+df.loc[:,(farm, 'V100')]**2)
df_features.loc[:,'Theta100'] = np.angle(df.loc[:,(farm, 'U100')]+df.loc[:,(farm, 'V100')]*1j, deg=True)
df_features.loc[:,'Utot10_Acc'] = df_features.loc[:,'Utot10'].diff(1)
df_features.loc[:,'Utot100_Acc'] = df_features.loc[:,'Utot100'].diff(1)
df_features.loc[:,'Utot310'] = df_features.loc[:,'Utot10']**3
df_features.loc[:,'Utot3100'] = df_features.loc[:,'Utot100']**3
df_features.loc[:,'Utotdiff'] = df_features.loc[:,'Utot100']-df_features.loc[:,'Utot10']
for feature in df_features.columns:
df.loc[:, (farm, feature)] = df_features[feature]
for farm in df.columns.levels[0]:
df_features = pd.DataFrame(index=df.index)
df_features.loc[:, 'O_UTot100_Mean'] = df.loc[:, pd.IndexSlice[:, 'Utot100']].mean(axis=1)
df_features.loc[:, 'O_UTot100_Std'] = df.loc[:, pd.IndexSlice[:, 'Utot100']].std(axis=1)
df_features.loc[:, 'O_UTot100_Min'] = df.loc[:, pd.IndexSlice[:, 'Utot100']].min(axis=1)
df_features.loc[:, 'O_UTot100_Max'] = df.loc[:, pd.IndexSlice[:, 'Utot100']].max(axis=1)
df_features.loc[:, 'O_Theta100_Mean'] = df.loc[:, pd.IndexSlice[:, 'Theta100']].mean(axis=1)
df_features.loc[:, 'O_Theta100_Std'] = df.loc[:, pd.IndexSlice[:, 'Theta100']].std(axis=1)
df_features.loc[:, 'O_Theta100_Min'] = df.loc[:, pd.IndexSlice[:, 'Theta100']].min(axis=1)
df_features.loc[:, 'O_Theta100_Max'] = df.loc[:, pd.IndexSlice[:, 'Theta100']].max(axis=1)
df_features.loc[:, 'O_UTot100_Acc_Mean'] = df.loc[:, pd.IndexSlice[:, 'Utot100_Acc']].mean(axis=1)
df_features.loc[:, 'O_UTot100_Acc_Std'] = df.loc[:, pd.IndexSlice[:, 'Utot100_Acc']].std(axis=1)
df_features.loc[:, 'O_UTot100_Acc_Min'] = df.loc[:, pd.IndexSlice[:, 'Utot100_Acc']].min(axis=1)
df_features.loc[:, 'O_UTot100_Acc_Max'] = df.loc[:, pd.IndexSlice[:, 'Utot100_Acc']].max(axis=1)
#other_farms = df.columns.levels[0].difference([farm])
for ofarm in df.columns.levels[0]:
df_features.loc[:, 'O_UTot100_'+ofarm] = df.loc[:, pd.IndexSlice[ofarm, 'Utot100']].values
for ofarm in df.columns.levels[0]:
df_features.loc[:, 'O_Theta100_'+ofarm] = df.loc[:, pd.IndexSlice[ofarm, 'Theta100']].values
for feature in df_features.columns:
df.loc[:, (farm, feature)] = df_features[feature]
# Physical forecast
columns = [df.columns.levels[0], ['wind_speed'], [10, 100]]
df_weather = pd.DataFrame(columns= | pd.MultiIndex.from_product(columns) | pandas.MultiIndex.from_product |
# -*- coding: utf-8 -*-
import requests as req
from bs4 import BeautifulSoup as bs
import lxml
from pathlib import Path
# csv writer
import pandas as pd
import time
from .config import BASE_DIR, BASE_URL
# import functions from common.py
from .common import (initialize, get_chrome_driver, makedirs,
get_logger, csv2xl, get_session, write_json)
# function for getting product_rating & reviews
def product_rating(url):
base_url = 'https://widget.trustpilot.com/trustboxes/5763bccae0a06d08e809ecbb/index.html'
driver = get_chrome_driver()
driver.get(base_url+url)
time.sleep(3)
soup = bs(driver.page_source.encode('utf-8'), 'lxml')
time.sleep(3)
star_rating = soup.find(
'div', {'class': 'tp-widget-summary__rating'}).find('span', {'class': 'rating'}).text
reviews = soup.find('div', {'class': 'tp-widget-reviews'}
).find_all('div', {'class': 'tp-widget-review'})
final_review_text = ''
for review in reviews:
name = review.find('div', {'class': 'tp-widget-review__heading'}).find(
'span', {'class': 'tp-widget-review__display-name'}).text+'\n'
date = review.find(
'div', {'class': 'tp-widget-review__date'}).text+'\n'
review_text = review.find(
'div', {'class': 'tp-widget-review__text'}).text+'\n\n'
final_review_text += name+date+review_text
return star_rating, final_review_text
# def_end
# function for getting itinerary_details
def get_itinerary(url):
se = get_session()
r = se.get(BASE_URL+url)
soup = bs(r.content, 'lxml')
itinerary = soup.find('div', {'class': 'itinerary'}).find_all(
'div', {'class': 'day detailed'})
detailed_itinerary = ''
counter = 1
for data in itinerary:
try:
day_number = str(counter)+'. '+data.find('span',
{'class': 'day-number'}).text+' '
# print(day_number)
except:
day_number = ''
try:
location = data.find('span', {'class': 'location'}).text+'\n\n'
except:
location = ''
try:
summary = data.find('div', {'class': 'summary'}).text.replace(
' ', '').replace('\n', '')+'\n\n'
except:
summary = ''
try:
instructions = data.find('div', {'class': 'instructions'}).text.replace(
' ', '').replace('\n', '')+'\n\n'
except:
instructions = ''
try:
day_inclusions = data.find('div', {'class': [
'day-inclusions', 'meals']}).text.replace(' ', '').replace('\n', '')+'\n\n'
except:
day_inclusions = ''
try:
details = data.find('div', {'class': 'details'}).text
components = data.find_all('div', {'class': 'components'})
optional_activity_data = ''
transport_final = ''
land_tour_final = ''
landsport_final = ''
animal_final = ''
watersport_final = ''
nature_final = ''
accommodation_final = ''
exhibit_final = ''
landmark_final = ''
health_and_wellness_final = ''
for component_data in components:
# health_and_wellness
try:
health_and_wellness_final += 'Health And Wellness: '+component_data.find('div', {'class': 'health-and-wellness'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'health-and-wellness'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# transport
try:
transport_final += 'Transport: '+component_data.find('div', {'class': 'transport'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'transport'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# exhibit
try:
exhibit_final += 'Exhibit: '+component_data.find('div', {'class': 'exhibit'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'exhibit'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# landmark
try:
landmark_final += 'Landmark: '+component_data.find('div', {'class': 'landmark'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'landmark'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# land-tour
try:
land_tour_final += 'Landtour: '+component_data.find('div', {'class': 'land-tour'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'land-tour'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# landsport
try:
landsport_final += 'Landsport: '+component_data.find('div', {'class': 'landsport'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace('\n', '')+'\n'+component_data.find('div', {'class': 'landsport'}).find(
'div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n Price per person : '+component_data.find('div', {'class': 'landsport'}).find('div', {'class': 'c-title'}).find('span', {'class': 'budget'}).text.replace('\n', '').replace(' ', '')+'\n'
except:
pass
# animal
try:
animal_final += 'Animal: '+component_data.find('div', {'class': 'animal'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'animal'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# watersport
try:
watersport_final += 'Watersport: '+component_data.find('div', {'class': 'watersport'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'watersport'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n'
except:
pass
# nature
try:
nature_final += 'Nature: '+component_data.find('div', {'class': 'nature'}).find('div', {'class': 'c-title'}).find('h5').text.replace(' ', '').replace(
'\n', '')+'\n'+component_data.find('div', {'class': 'nature'}).find('div', {'class': 'c-title'}).find('div', {'class': 'summary'}).text+'\n\n'
except:
pass
# Accommodation
try:
accommodation_final += 'Accommodation: '+component_data.find(
'div', {'class': 'accommodation'}).find('div', {'class': 'c-title'}).find('h5').text+'\n\n'
except:
pass
optional_activity_data = transport_final + land_tour_final + landsport_final + \
animal_final + watersport_final + nature_final + accommodation_final + \
exhibit_final+landmark_final+health_and_wellness_final
detailed_itinerary += day_number+location + \
summary+instructions+day_inclusions+optional_activity_data
except:
continue
counter += 1
return detailed_itinerary
# def_end
# function for getting tour_details
def get_tour_details(url):
se = get_session()
r = se.get(BASE_URL+url)
soup = bs(r.content, 'lxml')
try:
highlights = soup.find('div', {'id': 'highlights'}).find('p').text
# print(highlights)
except:
highlights = ''
try:
dossier_disclaimer = soup.find(
'div', {'id': 'dossier-disclaimer'}).find('p').text
# print(dossier_disclaimer)
except:
dossier_disclaimer = ''
try:
itinerary_disclaimer = soup.find(
'div', {'id': 'itinerary-disclaimer'}).find_all('p')
# print(itinerary_disclaimer)
except:
itinerary_disclaimer = ''
try:
important_notes = soup.find(
'div', {'id': 'important-notes'}).find_all('p')
# print(important_notes)
except:
important_notes = ''
try:
group_leader_description = soup.find(
'div', {'id': 'group-leader-description'}).find_all('p')
except:
group_leader_description = ''
try:
group_size_notes = soup.find(
'div', {'id': 'group-size-notes'}).find_all('p')
except:
group_size_notes = ''
try:
local_flights = soup.find('div', {'id': 'local-flights'}).find_all('p')
except:
local_flights = ''
try:
what_to_take = soup.find('div', {'id': 'what-to-take'}).find_all('p')
except:
what_to_take = ''
try:
packing_list = soup.find('div', {'id': 'packing-list'}).find_all('p')
# print(packing_list)
except:
packing_list = ''
try:
visas_requirements = soup.find(
'div', {'id': 'visas-and-entry-requirements'}).find_all('p')
except:
visas_requirements = ''
try:
weather = soup.find('div', {'id': 'detailed-trip-notes'}).find_all('p')
except:
weather = ''
try:
optional_activities = soup.find(
'div', {'id': 'optional-activities'}).find_all('p')
except:
optional_activities = ''
try:
travel_insurance = soup.find(
'div', {'id': 'travel-insurance'}).find_all('p')
except:
travel_insurance = ''
try:
emergency_contact = soup.find(
'div', {'id': 'emergency-contact'}).find_all('p')
except:
emergency_contact = ''
try:
local_dress = soup.find('div', {'id': 'local-dress'}).find_all('p')
except:
local_dress = ''
try:
physical_grading = soup.find('div', {'id': 'introduction'}).find('span', {
'class': 'muted'}, text='Physical Grading:').parent.text.replace('Physical Grading: ', '')
except:
physical_grading = ''
try:
whats_included = soup.find(
'div', {'id': 'whats-included'}).find_all('p')
except:
whats_included = ''
return highlights, dossier_disclaimer, itinerary_disclaimer, important_notes, group_leader_description, group_size_notes, local_flights, what_to_take, packing_list, visas_requirements, weather, optional_activities, travel_insurance, emergency_contact, local_dress, physical_grading, whats_included
# Run is a driver Function.
def run(url, infod, sitename, logger):
print(url)
driver = get_chrome_driver()
driver.get(url)
soup = bs(driver.page_source.encode('utf-8'), 'lxml')
highlights, dossier_disclaimer, itinerary_disclaimer, important_notes, group_leader_description, group_size_notes, local_flights, what_to_take, packing_list, visas_requirements, weather, optional_activities, travel_insurance, emergency_contact, local_dress, physical_grading, whats_included = get_tour_details(soup.find(
'ul', {'id': 'trip-summary-nav'}).find_all('a')[-1]['href'])
infod['OPERATOR NAME'].append('<NAME>')
infod['OPERATOR WEBSITE'].append(BASE_URL)
# adventure_name
try:
# print(soup.find('div', {'class': 'title-block'}
# ).find('h1', {'class': 'text-center'}).text)
infod['ADVENTURE NAME'].append(soup.find('div', {'class': 'title-block'}
).find('h1', {'class': 'text-center'}).text)
except:
infod['ADVENTURE NAME'].append('')
# trip_url
try:
infod['TRIP URL'].append(url)
except:
infod['TRIP URL'].append('')
# continent_name
try:
# print(soup.find('div', {'id': 'breadcrumbs'}).find_all('a')[-2])
infod['CONTINENT'].append(soup.find(
'div', {'id': 'breadcrumbs'}).find_all('a')[-2].text)
except:
infod['CONTINENT'].append('')
# country_name
try:
# print(soup.find('div', {'id': 'breadcrumbs'}).find_all('a')[-1].text)
infod['COUNTRY'].append(soup.find(
'div', {'id': 'breadcrumbs'}).find_all('a')[-1].text)
except:
infod['COUNTRY'].append('')
# start_point and end_point
try:
start_finish = soup.find('div', {'class': 'duration-container'}
).find('span', {'class': 'start_finish'}).text.split(' to ')
# print(start_finish[0])
infod['START POINT'].append(start_finish[0])
infod['END POINT'].append(start_finish[1])
except:
infod['START POINT'].append('')
infod['END POINT'].append('')
# trip_duration
try:
# print(soup.find('div', {'class': 'duration-container'}).find('span', {'class': 'duration'}).text.replace(' days', ''))
infod['TRIP DURATION (Days)'].append(soup.find('div', {
'class': 'duration-container'}).find('span', {'class': 'duration'}).text.replace(' days', ''))
except:
infod['TRIP DURATION (Days)'].append('')
# minimum_age
try:
# print(soup.find('div', {'id': 'age-requirement'}).find('h3').text.replace('Age requirement: ','').replace(' ','').replace('+',''))
infod['AGE (MIN)'].append(soup.find('div', {'id': 'age-requirement'}).find(
'h3').text.replace('Age requirement: ', '').replace(' ', '').replace('+', ''))
except:
infod['AGE (MIN)'].append('')
# adventure_overview
try:
# print(soup.find('div', {'id': 'trip-description'}
# ).find('p', {'class': 'visible-desktop'}).text)
infod['ADVENTURE OVERVIEW'].append(soup.find('div', {'id': 'trip-description'}
).find('p', {'class': 'visible-desktop'}).text.replace(' ', ''))
except:
infod['ADVENTURE OVERVIEW'].append('')
# highlights
try:
infod['HIGHLIGHTS'].append(highlights)
except:
infod['HIGHLIGHTS'].append('')
# brief_itinerary_details
try:
itinerary = soup.find('div', {'class': 'itineraries'}
).find_all('div', {'class': 'day'})
brief_itinerary = ''
counter = 1
for data in itinerary:
brief_itinerary += str(counter)+'. '+data.find('span', {'class': 'day-number'}).text+' '+data.find(
'span', {'class': 'location'}).text+'\n'
counter += 1
# print(brief_itinerary)
infod['BRIEF ITINERARY'].append(brief_itinerary)
except:
infod['BRIEF ITINERARY'].append('')
# detail_itineray_details
try:
# print(get_itinerary(soup.find('ul', {'id': 'trip-summary-nav'}).find_all('a')[-2]['href']))
infod['DETAILED ITINERARY'].append(get_itinerary(
soup.find('ul', {'id': 'trip-summary-nav'}).find_all('a')[-2]['href']))
except:
infod['DETAILED ITINERARY'].append('')
# weather
try:
weather_final = ''
for data in weather:
weather_final += data.text+'\n'
infod['WEATHER'].append(weather_final)
except:
infod['WEATHER'].append('')
# availability_status & departure_dates
try:
departure_dates = soup.find('div', {'id': 'departures-list'}
).find_all('div', {'class': 'action'})
available_status = soup.find('div', {'id': 'departures-list'}
).find_all('div', {'class': 'avail'})
final_status = []
for info in available_status:
# print(info.text)
stripped_info = info.text.replace(
'\n', '').replace(' ', '').replace('+', '')
if 'Available' in stripped_info:
final_status.append(
'A '+'('+stripped_info.split('\xa0')[0]+')')
# print(final_status)
dates = []
for data in departure_dates:
try:
dates.append(data.find('a')['href'].split('#date/')[1])
except:
continue
# print(data.find('a')['href'])
# print(dates)
dates_status_dict = dict(zip(dates, final_status))
departure_dates_final = ''
for date, date_status in dates_status_dict.items():
departure_dates_final += date+' '+date_status+'\n'
# print(departure_dates_final)
infod['UPCOMING DEPARTURE & AVAILABILITY'].append(
departure_dates_final)
except:
infod['UPCOMING DEPARTURE & AVAILABILITY'].append('')
infod['CURRENCY'].append('USD')
# price_per_person
try:
infod['PRICE PER PERSON'].append(soup.find('div', {'class': 'price'}
).find('span', {'class': 'price'}).text)
except:
infod['PRICE PER PERSON'].append('')
# booking_url
try:
# print(url.split('#')[0]+'pricing/')
infod['OPERATOR BOOKING URL'].append(url.split('#')[0]+'pricing/')
except:
infod['OPERATOR BOOKING URL'].append('')
# inclusions
try:
whats_included_final = ''
for data in whats_included:
whats_included_final += str(data)
infod['INCLUSIONS'].append(whats_included_final.replace(
'<br/>', '\n\u2022 ').replace('<p>', '\u2022 ').replace('</p>', '')+'\n')
except:
infod['INCLUSIONS'].append('')
# meals
try:
# print(soup.find(
# 'div', {'id': 'meals'}).find('p').text.replace(' ', ''))
infod['MEALS'].append(soup.find(
'div', {'id': 'meals'}).find('p').text.replace(' ', ''))
except:
infod['MEALS'].append('')
# lodging_details
try:
# print(soup.find('div', {'id': 'accommodations'}).find('p').text)
infod['LODGING'].append(soup.find(
'div', {'id': 'accommodations'}).find('p').text)
except:
infod['LODGING'].append('')
# transport
try:
# print(soup.find(
# 'div', {'id': 'transportation'}).find('p').text)
infod['TRANSPORT'].append(soup.find(
'div', {'id': 'transportation'}).find('p').text)
except:
infod['TRANSPORT'].append('')
infod['OTA NAME'].append('G Adventures')
infod['OTA WEBSITE'].append(BASE_URL)
# adventure_type
try:
if 'Cycling' in url.split('#')[1]:
# print('Biking')
infod['ADVENTURE TYPE'].append('Biking')
elif 'Hiking & Trekking' in url.split('#')[1]:
# print('Hiking, Trekking and Mountaineering')
infod['ADVENTURE TYPE'].append(
'Hiking, Trekking and Mountaineering')
elif 'Multisport' in url.split('#')[1]:
# print('Multisport')
infod['ADVENTURE TYPE'].append('Multisport')
except:
infod['ADVENTURE TYPE'].append('')
# adventure_subb_type
try:
infod['ADVENTURE SUB-TYPE'].append(url.split('#')[1])
except:
infod['ADVENTURE SUB-TYPE'].append('')
# difficulty_level and grade
try:
infod['DIFFICULTY LEVEL - BY OPERATOR'].append(physical_grading.split(' - ')[
0])
infod['GRADE - BY OPERATOR'].append(physical_grading.split(' - ')[1])
except:
infod['DIFFICULTY LEVEL - BY OPERATOR'].append('')
infod['GRADE - BY OPERATOR'].append('')
# activity_style
try:
# print(soup.find('div', {'id': 'trip-style'}
# ).find('h3').text.replace('Travel Style: ', ''))
infod['OPERATOR ACTIVITY STYLE'].append(soup.find(
'div', {'id': 'trip-style'}).find('h3').text.replace('Travel Style: ', ''))
except:
infod['OPERATOR ACTIVITY STYLE'].append('')
# packing_list & gears
try:
counter = 1
final_packing_list = ''
for data in packing_list:
final_packing_list += str(counter)+'. ' + \
data.text.replace('\u2022', '\n\u2022')+'\n\n'
# print(str(counter)+'. '+data.text.replace('\u2022','\n\u2022')+'\n')
counter += 1
infod['OPERATOR PACKING LIST - GEAR & DOCUMENT'].append(
final_packing_list)
except:
infod['OPERATOR PACKING LIST - GEAR & DOCUMENT'].append('')
# user_reviews
try:
rating, reviews = product_rating(
soup.find('div', {'class': 'trustpilot-widget'}).find('iframe')['src'].split('index.html')[1])
infod['USER REVIEWS'].append(reviews)
except:
infod['USER REVIEWS'].append('')
# product_code
try:
infod['OPERATOR PRODUCT CODE'].append(soup.find(
'div', {'class': 'trip_code'}).text.replace('Trip Code: ', ''))
except:
infod['OPERATOR PRODUCT CODE'].append('')
# discounts
try:
infod['DISCOUNTS'].append(soup.find('div', {'class': 'p-amount'}).find('a').text.replace(
' ', '').replace('\n', '')+'\n'+soup.find('div', {'class': 'p-expires'}).text)
except:
infod['DISCOUNTS'].append('')
# average_product_rating
try:
rating, reviews = product_rating(
soup.find('div', {'class': 'trustpilot-widget'}).find('iframe')['src'].split('index.html')[1])
infod['AVERAGE PRODUCT RATING'].append(rating)
except:
infod['AVERAGE PRODUCT RATING'].append('')
# images
try:
# print(soup.find('img', {'class': 'page-head'})['src'])
infod['IMAGES'].append(soup.find('img', {'class': 'page-head'})['src'])
except:
infod['IMAGES'].append('')
# extra_fields
try:
# print(soup.find(
# 'div', {'id': 'staff-experts'}).find('p').text)
infod['STAFF & EXPERTS'].append(soup.find(
'div', {'id': 'staff-experts'}).find('p').text)
except:
infod['STAFF & EXPERTS'].append('')
try:
infod['DOSSIER DISCLAIMER'].append(dossier_disclaimer)
except:
infod['DOSSIER DISCLAIMER'].append('')
try:
itinerary_disclaimer_final = ''
for data in itinerary_disclaimer:
itinerary_disclaimer_final += '\u2022'+data.text+'\n\n'
# print(itinerary_disclaimer_final)
infod['ITINERARY DISCLAIMER'].append(itinerary_disclaimer_final)
except:
infod['ITINERARY DISCLAIMER'].append('')
try:
important_notes_final = ''
for data in important_notes:
important_notes_final += str(data)
infod['IMPORTANT NOTES'].append(important_notes_final.replace(
'<p>', '').replace('</p>', '\n\n').replace('<br/>', ''))
except:
infod['IMPORTANT NOTES'].append('')
try:
group_leader_description_final = ''
for data in group_leader_description:
group_leader_description_final += data.text+'\n'
infod['GROUP LEADER DESCRIPTION'].append(
group_leader_description_final)
except:
infod['GROUP LEADER DESCRIPTION'].append('')
try:
group_size_notes_final = ''
for data in group_size_notes:
group_size_notes_final += data.text+'\n'
infod['GROUP SIZE'].append(group_size_notes_final)
except:
infod['GROUP SIZE'].append('')
try:
local_flights_final = ''
for data in local_flights:
local_flights_final += data.text+'\n'
# print(local_flights_final)
infod['LOCAL FLIGHTS'].append(local_flights_final)
except:
infod['LOCAL FLIGHTS'].append('')
try:
what_to_take_final = ''
for data in what_to_take:
what_to_take_final += '\u2022 '+data.text+'\n\n'
# print(what_to_take_final)
infod['WHAT TO TAKE'].append(what_to_take_final)
except:
infod['WHAT TO TAKE'].append('')
try:
visas_requirements_final = ''
for data in visas_requirements:
visas_requirements_final += '\u2022 '+data.text+'\n\n'
# print(visas_requirements_final)
infod['VISAS REQUIREMENTS'].append(visas_requirements_final)
except:
infod['VISAS REQUIREMENTS'].append('')
try:
optional_activities_final = ''
counter = 1
for data in optional_activities:
optional_activities_final += str(counter)+'. '+str(data)
counter += 1
# print(optional_activities_final.replace('<br/>', '\n').replace('<p>', '').replace('</p>', '\n'))
infod['OPTIONAL ACTIVITIES'].append(optional_activities_final.replace(
'<br/>', '\n').replace('<p>', '').replace('</p>', '\n'))
except:
infod['OPTIONAL ACTIVITIES'].append('')
try:
travel_insurance_final = ''
for data in travel_insurance:
travel_insurance_final += data.text
# print(travel_insurance_final)
infod['TRAVEL INSURANCE'].append(travel_insurance_final)
except:
infod['TRAVEL INSURANCE'].append('')
try:
emergency_contact_final = ''
counter = 1
for data in emergency_contact:
emergency_contact_final += '\n\n'+str(counter)+'. '+str(data)
counter += 1
# print(emergency_contact_final.replace('<br/>', '\n').replace('<p>', '').replace('</p>', ''))
infod['EMERGENCY CONTACT'].append(emergency_contact_final.replace(
'<br/>', '\n').replace('<p>', '').replace('</p>', ''))
except:
infod['EMERGENCY CONTACT'].append('')
try:
local_dress_final = ''
for data in local_dress:
local_dress_final += data.text+'\n'
# print(local_dress_final)
infod['LOCAL DRESS'].append(local_dress_final)
except:
infod['LOCAL DRESS'].append('')
driver.quit()
# function to write links to a text file
def write_link_txt(url):
file_path = 'links_gadventures.txt'
with open(file_path, "a") as textfile:
textfile.write(BASE_URL+url+'\n')
# def_end
# function to get the links
def get_links(activity, link):
se = get_session()
r = se.get(link.format('1'))
soup = bs(r.content, 'lxml')
try:
page = int(
soup.find('div', {'class': 'pagination'}).find_all('li')[-2].text)
except:
page = ''
if page:
for x in range(1, page+1):
r = se.get(link.format(x))
soup = bs(r.content, 'lxml')
link_div = soup.find('div', {'id': 'results'}).find_all(
'a', {'class': 'trip-tile-map'})
for result_link in link_div:
# print(result_link['href'])
write_link_txt(result_link['href']+'#'+activity)
else:
link_div = soup.find('div', {'id': 'results'}).find_all(
'a', {'class': 'trip-tile-map'})
for result_link in link_div:
write_link_txt(result_link['href']+'#'+activity)
# def_end
# Execute is a root funtion it starts the scraping process.
def execute(sitename):
makedirs(sitename)
logger = get_logger(sitename)
fname, infod, column_names = initialize(sitename)
activity_dict = {'Cycling': 'https://www.gadventures.com/search/?page={}&f=a7a3ea2baafa+612e33ca252a', 'Hiking & Trekking': 'https://www.gadventures.com/search/?page={}&f=612e33ca252a', 'Multisport': 'https://www.gadventures.com/search/?page={}&f=deae81eafd2d',
'Multisport': 'https://www.gadventures.com/search/?page={}&f=dossier_code=CRAC&dossier_code=CRSM&dossier_code=ONAP&dossier_code=AVHB&dossier_code=ATHB&dossier_code=SPHK&dossier_code=SEGL&dossier_code=ONSA&dossier_code=SEEM&dossier_code=DCAA&dossier_code=NUAB&dossier_code=DJJA&dossier_code=NUSA'}
# for activity, activity_id in activity_dict.items():
# get_links(activity, activity_id)
infile = BASE_DIR / 'core/links_gadventures.txt'
num_lines = sum(1 for line in open(infile))
with open(infile, 'r') as file:
links = file.read().splitlines()
for idx, link in enumerate(links):
# print(link)
run(link, infod, sitename, logger)
df = | pd.DataFrame(infod, columns=column_names) | pandas.DataFrame |
import pandas as pd
import numpy as np
from .content import test_questions_analisys as qa
from .content import tests_analisys as ta
from .output import output as out
def Execute(cursor, courseName):
#and questions NOT LIKE '__' means that we needn't questions like {}
#and attempts < 4 means that we use only three first attempts, other attempts are not interesting
request = """
select course_name, page, problem_header, problem_id, attempts, questions, grade, max_grade, execute_time
from problem_check
where questions NOT LIKE '__'
and attempts < 4
"""
cursor.execute(request)
data = cursor.fetchall()
if (data):
columns_names = []
for i in cursor.description[:]:
columns_names.append(i[0])
df = | pd.DataFrame(data=data, columns=columns_names) | pandas.DataFrame |
import os
import json
import sys
import argparse
from pathlib import Path
import pandas as pd
from tqdm import tqdm
DESCRIPTION = """
Build a csv file containing necessary information of a COCO dataset that is
compatible with this package.
"""
def get_bbox(bbox):
"""Get bbox of type (xmin, ymin, xmax, ymax) from a bbox of type
(x, y, w, h)"""
xmin, ymin, w, h = bbox
xmin = round(xmin)
ymin = round(ymin)
xmax = round(xmin + w) - 1
ymax = round(ymin + h) - 1
return [xmin, ymin, xmax, ymax]
def process_df(df_images, df_objects):
if df_objects is None:
df_merge = df_images[["id", "file_name", "width", "height"]]
df_merge = df_merge.set_index("id")
else:
# Merge
df = pd.merge(df_objects, df_images, left_on="image_id", right_on="id")
df = df[["image_id", "bbox", "category_id",
"file_name", "height", "width"]]
# Convert bboxes to integers
df["bbox"] = df["bbox"].apply(get_bbox)
# Merge all objects within each image
def transform(sub_df):
image_id, file_name, height, width = sub_df.iloc[0][
["image_id", "file_name", "height", "width"]]
category_ids = sub_df["category_id"].tolist()
category_ids = ",".join(map(str, category_ids))
bboxes = sub_df["bbox"].tolist()
bboxes = sum(bboxes, [])
bboxes = ",".join(map(str, bboxes))
return pd.Series({
"image_id": image_id, "img_name": file_name, "width": width,
"height": height, "bboxes": bboxes, "labels": category_ids
})
df_merge = df.groupby("image_id").apply(transform)
assert len(df_merge) == df_objects["image_id"].nunique()
return df_merge
def main(args):
# Read annotation file
print("Reading annotation file...")
with open(args.ann_path) as fin:
ann = json.load(fin)
print(f"Number of images: {len(ann['images'])}, number of annotations: "
f"{len(ann['annotations']) if 'annotations' in ann else -1}")
# Convert to dataframes
df_images = pd.DataFrame.from_records(ann["images"])
if "annotations" in ann:
df_objects = | pd.DataFrame.from_records(ann["annotations"]) | pandas.DataFrame.from_records |
#Analyze statistics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import alphapept.io
import os
import alphapept.io
import seaborn as sns
from tqdm.notebook import tqdm as tqdm
import warnings
def prepare_files(path1, path2):
df1 = | pd.read_hdf(path1, 'protein_fdr') | pandas.read_hdf |
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report
# Wrapping sklearn's confusion matrix
def confusion_error_matrix(y_row, y_col, target_names=None, normalize=False):
"""
Wrapper confusion_matrix of sklearn
Parameters
y_row & y_col: if y_row is y_pred and y_col is y_true,
Confusion Matrix is `Pre. / Cor.`
if y_row is y_true and y_col is y_pred,
Confusion Matrix is `Cor. / Pre.`
target_names: [string], List of target(label) name
normalize: bool: if normalize is True, confusion matrix is normalized, default False
Returns
conf_max: pd.DataFrame: confusion matrix
"""
conf_mat = confusion_matrix(y_row, y_col)
if normalize:
# 正規化処理
conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=0)[:, np.newaxis]
if target_names is not None:
conf_mat = | pd.DataFrame(conf_mat, columns=target_names, index=target_names) | pandas.DataFrame |
from datetime import date
import unittest
import dolphindb as ddb
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
import dolphindb.settings as keys
import statsmodels.api as sm
def createdata():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
stript = '''
time1=10:01:01 join 10:01:03 join 10:01:05 join 10:01:05
symbol1=take(`X`Z,4)
price1=3 3.3 3.2 3.1
size1=100 200 50 10
Trade=table(time1 as time,symbol1 as symbol,price1 as price,size1 as size)
time2=10:01:01 join 10:01:02 join 10:01:02 join 10:01:03
symbol2=take(`X`Z,4)
ask=90 150 100 52
bid=70 200 200 68
Quote=table(time2 as time,symbol2 as symbol,ask as ask,bid as bid)
share Trade as shareTrade
share Quote as shareQuote
login("admin", "123456")
if(existsDatabase("dfs://testmergepart"))
dropDatabase("dfs://testmergepart")
db = database("dfs://testmergepart", VALUE, "X" "Z")
pt1 = db.createPartitionedTable(Trade,`pt1,`symbol).append!(Trade)
pt2 = db.createPartitionedTable(Quote,`pt2,`symbol).append!(Quote)
'''
s.run(stript)
s.close()
class TestTable(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
createdata()
cls.pd_left = pd.DataFrame({'time': pd.to_datetime(
['1970-01-01T10:01:01', '1970-01-01T10:01:03', '1970-01-01T10:01:05', '1970-01-01T10:01:05']),
'symbol': ["X", "Z", "X", "Z"],
'price': [3, 3.3, 3.2, 3.1],
'size': [100, 200, 50, 10]})
cls.pdf_right = pd.DataFrame({'time': pd.to_datetime(
['1970-01-01T10:01:01', '1970-01-01T10:01:02', '1970-01-01T10:01:02', '1970-01-01T10:01:03']),
'symbol': ["X", "Z", "X", "Z"],
'ask': [90, 150, 100, 52],
'bid': [70, 200, 200, 68]})
@classmethod
def tearDownClass(cls):
script='''
undef((exec name from objs(true) where shared=1),SHARED)
if(existsDatabase('dfs://testmergepart')){
dropDatabase('dfs://testmergepart')
}
'''
cls.s.run(script)
def test_create_table_by_python_dictionary(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
tmp = self.s.table(data=data, tableAliasName="tmp")
re = self.s.run("tmp")
df = pd.DataFrame(data)
assert_frame_equal(tmp.toDF(), df)
assert_frame_equal(re, df)
def test_create_table_by_pandas_dataframe(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
df = pd.DataFrame(data)
tmp = self.s.table(data=df, tableAliasName="tmp")
re = self.s.run("tmp")
assert_frame_equal(tmp.toDF(), df)
assert_frame_equal(re, df)
def test_table_toDF(self):
tmp = self.s.loadText(DATA_DIR + "/USPrices_FIRST.csv")
df = self.s.run("select * from loadText('{data}')".format(data=DATA_DIR + "/USPrices_FIRST.csv"))
self.assertEqual(len(tmp.toDF()), len(df))
assert_frame_equal(tmp.toDF(), df)
tbName = tmp.tableName()
self.s.run("undef", tbName)
def test_table_showSQL(self):
tmp = self.s.loadText(DATA_DIR + "/USPrices_FIRST.csv")
sql = tmp.showSQL()
tbName = tmp.tableName()
self.assertEqual(sql, 'select PERMNO,date,SHRCD,TICKER,TRDSTAT,HEXCD,CUSIP,DLSTCD,DLPRC,'
'DLRET,BIDLO,ASKHI,PRC,VOL,RET,BID,ASK,SHROUT,CFACPR,CFACSHR,OPENPRC '
'from {tbName}'.format(tbName=tbName))
self.s.run("undef", tbName)
def test_table_sql_select_where(self):
data = DATA_DIR + "/USPrices_FIRST.csv"
tmp = self.s.loadText(data)
re = tmp.select(['PERMNO', 'date']).where(tmp.date > '2010.01.01')
df = self.s.run("select PERMNO,date from loadText('{data}') where date>2010.01.01".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
re = tmp.select(['PERMNO', 'date']).where(tmp.date > '2010.01.01').sort(['date desc'])
df = self.s.run(
"select PERMNO,date from loadText('{data}') where date>2010.01.01 order by date desc".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
re = tmp[tmp.date > '2010.01.01']
df = self.s.run("select * from loadText('{data}') where date>2010.01.01".format(data=data))
self.assertEqual(re.rows, 1510)
assert_frame_equal(re.toDF(), df)
tbName = tmp.tableName()
self.s.run("undef", tbName)
def test_table_sql_groupby(self):
data = DATA_DIR + "/USPrices_FIRST.csv"
tmp = self.s.loadText(data)
origin = tmp.toDF()
re = tmp.groupby('PERMNO').agg({'bid': ['sum']}).toDF()
df = self.s.run("select sum(bid) from loadText('{data}') group by PERMNO".format(data=data))
self.assertEqual((re['PERMNO'] == 10001).all(), True)
self.assertAlmostEqual(re['sum_bid'][0], 59684.9775)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO', 'date']).agg({'bid': ['sum']}).toDF()
df = self.s.run("select sum(bid) from loadText('{data}') group by PERMNO,date".format(data=data))
self.assertEqual(re.shape[1], 3)
self.assertEqual(len(re), 6047)
self.assertEqual((origin['BID'] == re['sum_bid']).all(), True)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO', 'date']).agg({'bid': ['sum'], 'ask': ['sum']}).toDF()
df = self.s.run("select sum(bid),sum(ask) from loadText('{data}') group by PERMNO,date".format(data=data))
self.assertEqual(re.shape[1], 4)
self.assertEqual(len(re), 6047)
self.assertEqual((origin['BID'] == re['sum_bid']).all(), True)
self.assertEqual((origin['ASK'] == re['sum_ask']).all(), True)
assert_frame_equal(re, df)
re = tmp.groupby(['PERMNO']).agg2([ddb.wsum, ddb.wavg], [('bid', 'ask')]).toDF()
df = self.s.run("select wsum(bid,ask),wavg(bid,ask) from loadText('{data}') group by PERMNO".format(data=data))
assert_frame_equal(re, df)
def test_table_sql_contextby(self):
data = {'sym': ['A', 'B', 'B', 'A', 'A'], 'vol': [1, 3, 2, 5, 4], 'price': [16, 31, 28, 19, 22]}
dt = self.s.table(data=data, tableAliasName="tmp")
re = dt.contextby('sym').agg({'price': [ddb.sum]}).toDF()
df = self.s.run("select sym,sum(price) from tmp context by sym")
self.assertEqual((re['sym'] == ['A', 'A', 'A', 'B', 'B']).all(), True)
self.assertEqual((re['sum_price'] == [57, 57, 57, 59, 59]).all(), True)
assert_frame_equal(re, df)
re = dt.contextby(['sym', 'vol']).agg({'price': [ddb.sum]}).toDF()
df = self.s.run("select sym,vol,sum(price) from tmp context by sym,vol")
self.assertEqual((re['sym'] == ['A', 'A', 'A', 'B', 'B']).all(), True)
self.assertEqual((re['vol'] == [1, 4, 5, 2, 3]).all(), True)
self.assertEqual((re['sum_price'] == [16, 22, 19, 28, 31]).all(), True)
assert_frame_equal(re, df)
re = dt.contextby('sym').agg2([ddb.wsum, ddb.wavg], [('price', 'vol')]).toDF()
df = self.s.run("select sym,vol,price,wsum(price,vol),wavg(price,vol) from tmp context by sym")
assert_frame_equal(re, df)
def test_table_sql_pivotby(self):
dt = self.s.table(data={'sym': ['C', 'MS', 'MS', 'MS', 'IBM', 'IBM', 'C', 'C', 'C'],
'price': [49.6, 29.46, 29.52, 30.02, 174.97, 175.23, 50.76, 50.32, 51.29],
'qty': [2200, 1900, 2100, 3200, 6800, 5400, 1300, 2500, 8800],
'timestamp': pd.date_range('2019-06-01', '2019-06-09')}, tableAliasName="tmp")
re = dt.pivotby(index='timestamp', column='sym', value='price').toDF()
expected = self.s.run('select price from tmp pivot by timestamp,sym')
self.assertEqual(re.equals(expected), True)
assert_frame_equal(re, expected)
re = dt.pivotby(index='timestamp.month()', column='sym', value='last(price)').toDF()
expected = self.s.run('select last(price) from tmp pivot by timestamp.month(),sym')
self.assertEqual(re.equals(expected), True)
assert_frame_equal(re, expected)
re = dt.pivotby(index='timestamp.month()', column='sym', value='count(price)').toDF()
expected = self.s.run('select count(price) from tmp pivot by timestamp.month(),sym')
self.assertEqual(re.equals(expected), True)
assert_frame_equal(re, expected)
tbName = dt.tableName()
self.s.run("undef", tbName)
def test_table_sql_merge(self):
dt1 = self.s.table(data={'id': [1, 2, 3, 3], 'value': [7, 4, 5, 0]}, tableAliasName="t1")
dt2 = self.s.table(data={'id': [5, 3, 1], 'qty': [300, 500, 800]}, tableAliasName="t2")
re = dt1.merge(right=dt2, on='id').toDF()
expected = self.s.run('select * from ej(t1,t2,"id")')
assert_frame_equal(re, expected)
re = dt1.merge(right=dt2, on='id', how='left').toDF()
expected = self.s.run('select * from lj(t1,t2,"id")')
re.fillna(0, inplace=True)
expected.fillna(0, inplace=True)
assert_frame_equal(re, expected)
re = dt1.merge(right=dt2, on='id', how='outer').toDF()
expected = self.s.run('select * from fj(t1,t2,"id")')
re.fillna(0, inplace=True)
expected.fillna(0, inplace=True)
assert_frame_equal(re, expected)
re = dt2.merge(right=dt1, on='id', how='left semi').toDF()
expected = self.s.run('select * from lsj(t2,t1,"id")')
re.fillna(0, inplace=True)
expected.fillna(0, inplace=True)
assert_frame_equal(re, expected)
self.s.run("undef", dt1.tableName())
self.s.run("undef", dt2.tableName())
def test_table_sql_mergr_asof(self):
dt1 = self.s.table(data={'id': ['A', 'A', 'A', 'B', 'B'],
'date': pd.to_datetime(
['2017-02-06', '2017-02-08', '2017-02-10', '2017-02-07', '2017-02-09']),
'price': [22, 23, 20, 100, 102]},
tableAliasName="t1")
dt2 = self.s.table(data={'id': ['A', 'A', 'B', 'B', 'B'],
'date': pd.to_datetime(
['2017-02-07', '2017-02-10', '2017-02-07', '2017-02-08', '2017-02-10'])},
tableAliasName="t2")
re = dt2.merge_asof(right=dt1, on=['id', 'date']).toDF()
expected = self.s.run('select * from aj(t2,t1,`id`date)')
assert_frame_equal(re, expected)
def test_table_sql_merge_cross(self):
dt1 = self.s.table(data={'year': [2010, 2011, 2012]}, tableAliasName="t1")
dt2 = self.s.table(data={'ticker': ['IBM', 'C', 'AAPL']}, tableAliasName="t2")
re = dt1.merge_cross(dt2).toDF()
expected = self.s.run('select * from cj(t1,t2)')
assert_frame_equal(re, expected)
def test_table_sql_merge_window(self):
dt1 = self.s.table(data={'sym': ["A", "A", "B"],
'time': [np.datetime64('2012-09-30 09:56:06'), np.datetime64('2012-09-30 09:56:07'),
np.datetime64('2012-09-30 09:56:06')],
'price': [10.6, 10.7, 20.6]},
tableAliasName="t1")
dt2 = self.s.table(
data={'sym': ["A", "A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "B", "B", "B", "B", "B", "B", "B"],
'time': pd.date_range(start='2012-09-30 09:56:01', end='2012-09-30 09:56:10', freq='s').append(
pd.date_range(start='2012-09-30 09:56:01', end='2012-09-30 09:56:10', freq='s')),
'bid': [10.05, 10.15, 10.25, 10.35, 10.45, 10.55, 10.65, 10.75, 10.85, 10.95, 20.05, 20.15, 20.25,
20.35, 20.45, 20.55, 20.65, 20.75, 20.85, 20.95],
'offer': [10.15, 10.25, 10.35, 10.45, 10.55, 10.65, 10.75, 10.85, 10.95, 11.05, 20.15, 20.25, 20.35,
20.45, 20.55, 20.65, 20.75, 20.85, 20.95, 21.01],
'volume': [100, 300, 800, 200, 600, 100, 300, 800, 200, 600, 100, 300, 800, 200, 600, 100, 300, 800,
200, 600]},
tableAliasName="t2")
re = dt1.merge_window(right=dt2, leftBound=-5, rightBound=0, aggFunctions="avg(bid)", on=['sym', 'time']).toDF()
expected = self.s.run('select * from wj(t1,t2,-5:0,<avg(bid)>,`sym`time)')
assert_frame_equal(re, expected)
re = dt1.merge_window(right=dt2, leftBound=-5, rightBound=-1,
aggFunctions=["wavg(bid,volume)", "wavg(offer,volume)"], on=["sym", "time"]).toDF()
expected = self.s.run('select * from wj(t1,t2,-5:-1,<[wavg(bid,volume), wavg(offer,volume)]>,`sym`time)')
assert_frame_equal(re, expected)
def test_table_chinese_column_name(self):
df = pd.DataFrame({'编号':[1, 2, 3, 4, 5], '序号':['壹','贰','叁','肆','伍']})
tmp = self.s.table(data=df, tableAliasName="chinese_t")
res=tmp.toDF()
assert_array_equal(res['编号'], [1, 2, 3, 4, 5])
assert_array_equal(res['序号'], ['壹','贰','叁','肆','伍'])
def test_table_top_with_other_clause(self):
df = pd.DataFrame({'id': [10, 8, 5, 6, 7, 9, 1, 4, 2, 3], 'date': pd.date_range('2012-01-01', '2012-01-10', freq="D"), 'value': np.arange(0, 10)})
tmp = self.s.table(data=df, tableAliasName="top_t")
re = tmp.top(3).sort("id").toDF()
assert_array_equal(re['id'], [1, 2, 3])
assert_array_equal(re['date'], np.array(['2012-01-07', '2012-01-09', '2012-01-10'], dtype="datetime64[D]"))
assert_array_equal(re['value'], [6, 8, 9])
re = tmp.top(3).where("id>5").toDF()
assert_array_equal(re['id'], [10, 8, 6])
assert_array_equal(re['date'], np.array(['2012-01-01', '2012-01-02', '2012-01-04'], dtype="datetime64[D]"))
assert_array_equal(re['value'], [0, 1, 3])
df = pd.DataFrame({'sym': ["C", "MS", "MS", "MS", "IBM", "IBM", "C", "C", "C"],
'price': [49.6, 29.46, 29.52, 30.02, 174.97, 175.23, 50.76, 50.32, 51.29],
'qty':[2200, 1900, 2100, 3200, 6800, 5400, 1300, 2500, 8800]})
tmp = self.s.table(data=df, tableAliasName="t1")
re = tmp.top(2).contextby("sym").sort("sym").toDF()
assert_array_equal(re['sym'], ["C", "C", "IBM", "IBM", "MS", "MS"])
assert_array_almost_equal(re['price'], [49.6, 50.76, 174.97, 175.23, 29.46, 29.52])
assert_array_equal(re['qty'], [2200, 1300, 6800, 5400, 1900, 2100])
def test_table_sql_update_where(self):
n = pd.DataFrame({'timestamp' : pd.to_datetime(['09:34:07','09:36:42','09:36:51','09:36:59','09:32:47','09:35:26','09:34:16','09:34:26','09:38:12']),
'sym' : ['C','MS','MS','MS','IBM','IBM','C','C','C'],
'price' : [49.6 ,29.46 ,29.52 ,30.02 ,174.97 ,175.23 ,50.76 ,50.32 ,51.29],
'qty' : [2200 ,1900 ,2100 ,3200 ,6800 ,5400 ,1300 ,2500 ,8800]})
dt1 = self.s.table(data=n, tableAliasName="t1")
re = dt1.update(["price"], ["price*10"]).where("sym=`C").execute().toDF()
assert_array_almost_equal(re["price"], [496,29.46,29.52,30.02,174.97,175.23,507.6,503.2,512.9])
def test_table_twice(self):
data = {'id': [1, 2, 2, 3],
'date': np.array(['2019-02-04', '2019-02-05', '2019-02-09', '2019-02-13'], dtype='datetime64[D]'),
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22, 3.5, 21, 26]}
dt = self.s.table(data=data, tableAliasName="t1")
dt = self.s.table(data=data, tableAliasName="t1")
re = self.s.loadTable("t1").toDF()
assert_array_equal(data['id'], re['id'])
assert_array_equal(data['date'], re['date'])
assert_array_equal(data['ticker'], re['ticker'])
assert_array_equal(data['price'], re['price'])
def test_table_repeatedly(self):
data = {'id': [1, 2, 2, 3],
'date': np.array(['2019-02-04', '2019-02-05', '2019-02-09', '2019-02-13'], dtype='datetime64[D]'),
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22, 3.5, 21, 26]}
for i in range(1,100):
dt = self.s.table(data=data, tableAliasName="t1")
re = self.s.loadTable("t1").toDF()
assert_array_equal(data['id'], re['id'])
assert_array_equal(data['date'], re['date'])
assert_array_equal(data['ticker'], re['ticker'])
assert_array_equal(data['price'], re['price'])
def test_table_csort(self):
script = '''
sym = `C`MS`MS`MS`IBM`IBM`C`C`C$SYMBOL
price= 49.6 29.46 29.52 30.02 174.97 175.23 50.76 50.32 51.29
qty = 2200 1900 2100 3200 6800 5400 1300 2500 8800
timestamp = [09:34:07,09:36:42,09:36:51,09:36:59,09:32:47,09:35:26,09:34:16,09:34:26,09:38:12]
t1 = table(timestamp, sym, qty, price);
'''
self.s.run(script)
tb = self.s.loadTable(tableName="t1")
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort("timestamp").top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort("timestamp").limit(-2).toDF()
expected = self.s.run("select timestamp, sym, qty, price from t1 context by sym csort timestamp limit -2")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"]).top(2).toDF()
expected = self.s.run("select timestamp, sym, qty, price from t1 context by sym csort timestamp, qty limit 2")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"], False).top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp desc, qty desc")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"], True).top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp asc, qty asc")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"], [True, False]).top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp asc, qty desc")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"], [False, True]).top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp desc, qty asc")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"], [True, True]).top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp asc, qty asc")
assert_frame_equal(re, expected)
re = tb.select(["timestamp", "sym", "qty", "price"]).contextby("sym").csort(["timestamp", "qty"], [False, False]).top(2).toDF()
expected = self.s.run("select top 2 timestamp, sym, qty, price from t1 context by sym csort timestamp desc, qty desc")
assert_frame_equal(re, expected)
def test_dfs_table_csort(self):
script = '''
dbName="dfs://test_csort"
if(existsDatabase(dbName)){
dropDatabase(dbName)
}
db = database(dbName, VALUE, 1..20)
n=1000000
t = table(rand(1..20, n) as id, rand(2012.01.01..2012.06.30, n) as date, rand(100, n) as val)
db.createPartitionedTable(t, `pt, `id).append!(t)
'''
self.s.run(script)
tb = self.s.loadTable(tableName="pt",dbPath="dfs://test_csort")
re = tb.select(["id", "date", "val"]).contextby("id").csort(["date"]).top(50).toDF()
expected = self.s.run('''select top 50 * from loadTable("dfs://test_csort", `pt) context by id csort date ''')
assert_frame_equal(re, expected)
def test_table_limit(self):
script = '''
sym = `C`MS`MS`MS`IBM`IBM`C`C`C$SYMBOL
price= 49.6 29.46 29.52 30.02 174.97 175.23 50.76 50.32 51.29
qty = 2200 1900 2100 3200 6800 5400 1300 2500 8800
timestamp = [09:34:07,09:36:42,09:36:51,09:36:59,09:32:47,09:35:26,09:34:16,09:34:26,09:38:12]
t = table(timestamp, sym, qty, price);
'''
self.s.run(script)
tb = self.s.loadTable(tableName="t")
re = tb.select("*").limit(2).toDF()
expected = self.s.run("select * from t limit 2")
assert_frame_equal(re, expected)
# re = tb.select("*").limit(2, 5).toDF()
# expected = self.s.run("select * from t limit 2, 5")
# assert_frame_equal(re, expected)
def test_table_sort_desc(self):
script = '''
sym = `C`MS`MS`MS`IBM`IBM`C`C`C$SYMBOL
price= 49.6 29.46 29.52 30.02 174.97 175.23 50.76 50.32 51.29
qty = 2200 1900 2100 3200 6800 5400 1300 2500 8800
timestamp = [09:34:07,09:36:42,09:36:51,09:36:59,09:32:47,09:35:26,09:34:16,09:34:26,09:38:12]
t1 = table(timestamp, sym, qty, price);
'''
self.s.run(script)
tb = self.s.loadTable(tableName="t1")
re = tb.select("*").sort("timestamp").toDF()
expected = self.s.run("select * from t1 order by timestamp asc")
assert_frame_equal(re, expected)
re = tb.select("*").sort("timestamp", False).toDF()
expected = self.s.run("select * from t1 order by timestamp desc")
assert_frame_equal(re, expected)
re = tb.select("*").sort("timestamp", True).toDF()
expected = self.s.run("select * from t1 order by timestamp asc")
assert_frame_equal(re, expected)
re = tb.select("*").sort(["timestamp", "price"], False).toDF()
expected = self.s.run("select * from t1 order by timestamp desc, price desc")
assert_frame_equal(re, expected)
re = tb.select("*").sort(["timestamp", "price"], True).toDF()
expected = self.s.run("select * from t1 order by timestamp asc, price asc")
assert_frame_equal(re, expected)
re = tb.select("*").sort(["timestamp", "price"], [True, False]).toDF()
expected = self.s.run("select * from t1 order by timestamp asc, price desc")
assert_frame_equal(re, expected)
re = tb.select("*").sort(["timestamp", "price"], [False, True]).toDF()
expected = self.s.run("select * from t1 order by timestamp desc, price asc")
assert_frame_equal(re, expected)
def test_merge_with_other_operation(self):
s = self.s
trade = s.table(data="shareTrade")
quote = s.table(data="shareQuote")
# trade = orca.read_shared_table("shareTrade")
# quote = orca.read_shared_table("shareQuote")
#
pd_left = self.pd_left
pd_right = self.pdf_right
# with select
res_temp = trade.merge(right=quote, how='inner', on=['symbol', 'time']).select(
["time", "symbol", "price", "size", "ask-bid as diff"])
pdf = pd.merge(pd_left, pd_right, on=['symbol', 'time'])
pdf['diff'] = pdf['ask'] - pdf['bid']
res = pdf[['time', 'symbol', 'price', 'size', 'diff']]
assert_frame_equal(res_temp.toDF(), res, check_dtype=False)
# with sort
odf_res = trade.merge(right=quote, how='inner', on=['symbol', 'time']).sort(bys='price')
pdf_res = pd.merge(pd_left, pd_right, on=['symbol', 'time']).sort_values(by='price')
assert_frame_equal(pdf_res, odf_res.toDF(), check_dtype=False)
# right join with sort
odf_res = trade.merge(right=quote, how='right', on=['symbol', 'time']).select(
["time", "symbol", "price", "size", "ask-bid as diff"]).sort(bys='time ,symbol')
pdf = pd.merge(pd_left, pd_right, how='right', on=['symbol', 'time']).sort_values(['symbol', 'time'])
pdf['diff'] = pdf['ask'] - pdf['bid']
pdf_res = pdf[['time', 'symbol', 'price', 'size', 'diff']]
# print(odf_res.toDF())
# print(pdf_res)
# assert_frame_equal(odf_res.toDF(),pdf_res,check_dtype=False, check_index_type=False)
assert_array_equal(odf_res.toDF()['time'], pdf_res['time'], verbose=True)
assert_array_equal(odf_res.toDF()['symbol'], pdf_res['symbol'], verbose=True)
assert_array_equal(odf_res.toDF()['price'], pdf_res['price'], verbose=True)
assert_array_equal(odf_res.toDF()['diff'], pdf_res['diff'], verbose=True)
assert_array_equal(odf_res.toDF()['size'], pdf_res['size'], verbose=True)
# left semi join with sort
dt1 = self.s.table(data={'id': [1, 2, 3, 3], 'value': [7, 4, 5, 0]}, tableAliasName="t1")
dt2 = self.s.table(data={'id': [5, 3, 1], 'qty': [300, 500, 800]}, tableAliasName="t2")
odf_res = dt2.merge(right=dt1, how='left semi', on='id').select(
["id", "value", "qty", "value-qty as diff"]).sort(bys='id').toDF()
res = self.s.run('select id, value,qty, value-qty as diff from lsj(t2,t1,"id") order by id')
res.fillna(0, inplace=True)
odf_res.fillna(0, inplace=True)
assert_frame_equal(odf_res, res)
self.s.run("undef", dt1.tableName())
self.s.run("undef", dt2.tableName())
def test_merge_with_other_operation_partition(self):
s = self.s
trade = s.loadTable(dbPath="dfs://testmergepart", tableName="pt1")
quote = s.loadTable(dbPath="dfs://testmergepart", tableName="pt2")
# trade = orca.read_shared_table("shareTrade")
# quote = orca.read_shared_table("shareQuote")
#
pd_left = self.pd_left
pd_right = self.pdf_right
# with select
res_temp = trade.merge(right=quote, how='inner', on=['symbol', 'time']).select(
["time", "symbol", "price", "size", "ask-bid as diff"])
pdf = pd.merge(pd_left, pd_right, on=['symbol', 'time'])
pdf['diff'] = pdf['ask'] - pdf['bid']
res = pdf[['time', 'symbol', 'price', 'size', 'diff']]
assert_frame_equal(res_temp.toDF(), res, check_dtype=False)
# with sort
odf_res = trade.merge(right=quote, how='inner', on=['symbol', 'time']).sort(bys='price')
pdf_res = | pd.merge(pd_left, pd_right, on=['symbol', 'time']) | pandas.merge |
import os
import requests
from typing import List
import pandas as pd
URL = 'http://64.111.127.166/origin-destination/'
FILENAME = 'date-hour-soo-dest-{}.csv.gz'
ALL_FILE = 'od_count_all_time.feather'
DATA_DIR = './data/'
ALL_FILE_PATH = os.path.join(DATA_DIR, ALL_FILE)
def download_files():
dataframes = []
for year in range(2016, 2021):
file_year = FILENAME.format(year)
filename = os.path.join(DATA_DIR, file_year)
if file_year not in os.listdir(DATA_DIR):
print(f'Getting file {file_year}')
resp = requests.get(URL + file_year)
with open(filename, 'wb') as f:
f.write(resp.content)
df = pd.read_csv(filename)
df.columns = ['Date', 'Hour', 'In', 'Out', 'Count']
df.index = | pd.DatetimeIndex(df['Date']) | pandas.DatetimeIndex |
from __future__ import annotations
import numpy as np
from typing import List, Union, Tuple, Optional, Callable, Any, TYPE_CHECKING
import lmfit as lm
import pandas as pd
from dataclasses import dataclass
import logging
from ...hdf_util import NotFoundInHdfError, with_hdf_read, with_hdf_write, DatDataclassTemplate
from ... import core_util as CU
from . import dat_attribute as DA
if TYPE_CHECKING:
from ..dat_hdf import DatHDF
logger = logging.getLogger(__name__)
FIT_NUM_BINS = 1000
_pars = lm.Parameters()
_pars.add_many(('mid', 0, True, None, None, None, None),
('theta', 20, True, 0, 500, None, None),
('const', 0, False, None, None, None, None),
('dS', 0, True, -5, 5, None, None),
('dT', 5, True, -10, 50, None, None))
DEFAULT_PARAMS = _pars
def entropy_nik_shape(x, mid, theta, const, dS, dT):
"""fit to entropy curve"""
arg = ((x - mid) / (2 * theta))
return -dT * ((x - mid) / (2 * theta) - 0.5 * dS) * (np.cosh(arg)) ** (-2) + const
class Entropy(DA.FittingAttribute):
version = '2.0.0'
group_name = 'Entropy'
description = 'Fitting to entropy shape (either measured by lock-in or from square heating)'
DEFAULT_DATA_NAME = 'entropy_signal'
def __init__(self, dat: DatHDF):
super().__init__(dat)
self._integrated_entropy = None
self._integration_infos = {}
@property
def integrated_entropy(self):
"""Returns DEFAULT integrated entropy if previously calculated
Note: Needs to be calculated with a passed in dT first using dat.Entropy.get_integrated_entropy()
"""
if self._integrated_entropy is None:
self._integrated_entropy = self.get_integrated_entropy()
return self._integrated_entropy
@property
def integration_info(self):
return self.get_integration_info('default')
def set_integration_info(self,
dT: float,
amp: Optional[float] = None,
dx: Optional[float] = None,
sf: Optional[float] = None,
name: Optional[str] = None,
overwrite=False) -> IntegrationInfo:
"""
Sets information required to calculate integrated entropy in HDF.
Note: Mostly dT is required, others can be calculated from dat
Args:
dT (): Heating amount (will default to calculating from dc_info and biases)
amp (): Charge sensor sensitivity (will default to dat.Transition.avg_fit.best_values.amp)
dx (): Step size between measurements in gate potential (will default to step size of self.x)
sf (): Scaling factor for integration (will default to calculating based on dT, amp, dx)
name (): Name to save integration info under (will default to 'default')
overwrite (): Whether to overwrite an existing IntegrationInfo
Returns:
(bool): True if successfully saved
"""
if name is None:
name = 'default'
if self._integration_info_exists(name) and overwrite is False:
raise FileExistsError(f'{name} IntegrationInfo already exists, to overwrite set overwrite=True')
if amp is None:
amp = self.dat.Transition.avg_fit.best_values.amp
if dx is None:
dx = abs((self.x[-1] - self.x[0]) / self.x.shape[-1]) # Should be same for avg_x or x
if sf is None:
sf = scaling(dT, amp, dx)
int_info = IntegrationInfo(dT=dT, amp=amp, dx=dx, sf=sf)
self._save_integration_info(name, int_info)
self._integration_infos[name] = int_info
return int_info
def get_integration_info(self, name: Optional[str] = None) -> IntegrationInfo:
"""
Returns named integration info (i.e. all things relevant to calculating integrated entropy).
This also acts as a caching function to make things faster
Args:
name (): Name of integration info to look for (will default to 'default')
Returns:
(IntegrationInfo): Info relevant to calculating integrated entropy
"""
if name is None:
name = 'default'
if name not in self._integration_infos:
if self._integration_info_exists(name):
self._integration_infos[name] = self._get_integration_info_from_hdf(name)
else:
raise NotFoundInHdfError(f'No IntegrationInfo found for dat{self.dat.datnum} with name {name}.\n'
f'Use dat.Entropy.set_integration_info(..., name={name}) first')
return self._integration_infos[name]
@with_hdf_read
def get_integration_info_names(self) -> List[str]:
group = self.hdf.group.get('IntegrationInfo')
return list(group.keys())
# @lru_cache
def get_integrated_entropy(self,
row: Optional[int] = None,
name: Optional[str] = None,
data: Optional[np.ndarray] = None) -> np.ndarray:
"""
Calculates integrated entropy given optional info. Will look for saved scaling factor info if available in HDF
Args:
row (): Optionally specify a row of data to integrate, None will default to using avg_data
name (): Optional name to look for or save scaling factor info under
data (): nD Data to integrate (last axis is integrated)
(Only use to override data being integrated, will by default use row or avg)
Returns:
(np.ndarray): Integrated entropy data
"""
if name is None:
name = 'default'
# Get data to integrate
if data is None: # Which should usually be the case
if row is None:
use_avg = True
else:
assert type(row) == int
use_avg = False
if use_avg:
data = self.avg_data
else:
data = self.data[row]
int_info = self.get_integration_info(name)
integrated = integrate_entropy(data, int_info.sf)
return integrated
@with_hdf_read
def _get_integration_info_from_hdf(self, name: str) -> Optional[IntegrationInfo]:
group = self.hdf.group.get('IntegrationInfo')
return self.get_group_attr(name, check_exists=True, group_name=group.name, DataClass=IntegrationInfo)
@with_hdf_read
def _integration_info_exists(self, name: str) -> bool:
group = self.hdf.group.get('IntegrationInfo')
if name in group:
return True
return False
@with_hdf_write
def _save_integration_info(self, name: str, info: IntegrationInfo):
group = self.hdf.group.get('IntegrationInfo')
info.save_to_hdf(group, name)
def default_data_names(self) -> List[str]:
# return ['x', 'entropy_signal']
raise RuntimeError(f'I am overriding set_default_data_descriptors, this should not be called')
def clear_caches(self):
super().clear_caches()
# self.get_integrated_entropy.cache_clear()
self._integrated_entropy = None
self._integration_infos = {}
def get_centers(self):
if 'centers' in self.specific_data_descriptors_keys:
return self.get_data('centers')
else:
return self.dat.Transition.get_centers()
def get_default_params(self, x: Optional[np.ndarray] = None,
data: Optional[np.ndarray] = None) -> Union[List[lm.Parameters], lm.Parameters]:
if x is not None and data is not None:
params = get_param_estimates(x, data)
if len(params) == 1:
params = params[0]
return params
else:
return DEFAULT_PARAMS
def get_default_func(self) -> Callable[[Any], float]:
return entropy_nik_shape
@with_hdf_write
def initialize_additional_FittingAttribute_minimum(self):
group = self.hdf.group
ii_group = group.require_group('IntegrationInfo')
ii_group.attrs['Description'] = 'Stored information required to integrate entropy signal (i.e. dT, amp, scale ' \
'factor).\nIf dT and amp are used to calculate scale factor, then all three are' \
'stored, otherwise only scale factor is stored.\n' \
'Multiplying entropy by scale factor gives integrated entropy'
def set_default_data_descriptors(self):
"""
Overriding to either get Square Entropy signal, or Lock-in Entropy signal rather than just looking for
normal saved data
Set the data descriptors required for fitting (e.g. x, and i_sense)
Returns:
"""
try:
descriptor = self.get_descriptor('entropy_signal')
x = self.get_descriptor('x')
self.set_data_descriptor(descriptor, 'entropy_signal') # Only copy descriptor if already exists
self.set_data_descriptor(x, 'x')
except NotFoundInHdfError:
x, data, centers = get_entropy_signal_from_dat(self.dat) # Get x as well, because Square Entropy makes it's own x
self.set_data('entropy_signal', data) # Save dataset because being calculated
self.set_data('x', x)
if centers is not None:
centers = centers - np.average(centers) # So that when making average_x it doesn't shift things further
self.set_data('centers', centers)
@dataclass
class IntegrationInfo(DatDataclassTemplate):
dT: Optional[float]
amp: Optional[float]
dx: Optional[float]
sf: Optional[float]
def to_df(self) -> pd.DataFrame:
df = pd.DataFrame(data=[[getattr(self, k) for k in self.__annotations__]],
columns=[k for k in self.__annotations__])
return df
def integrate(self, data: np.ndarray) -> np.ndarray:
return integrate_entropy(data, self.sf)
def get_entropy_signal_from_dat(dat: DatHDF) -> Tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
x = dat.Data.get_data('x')
centers = None # So that I can store centers if using Square Entropy which generates centers
if dat.Logs.awg is not None: # Assuming square wave heating, getting entropy signal from i_sense
entropy_signal = dat.SquareEntropy.entropy_signal
x = dat.SquareEntropy.x
centers = np.array(dat.SquareEntropy.default_Output.centers_used)
elif all([k in dat.Data.keys for k in ['entropy_x', 'entropy_y']]): # Both x and y present, generate R and use that as signal
entx, enty = [dat.Data.get_data(k) for k in ['entropy_x', 'entropy_y']]
try:
centers = dat.Transition.get_centers()
logger.info(f'Using centers from dat.Transition to average entropyx/y data to best determine phase from avg')
except NotFoundInHdfError:
centers = None
entropy_signal, entropy_angle = calc_r(entx, enty, x, centers=centers)
elif 'entropy_x' in dat.Data.keys or 'entropy' in dat.Data.keys: # Only entropy_x recorded so use that as entropy signal
if 'entropy_x' in dat.Data.keys:
entropy_signal = dat.Data.get_data('entropy_x')
elif 'entropy' in dat.Data.keys:
entropy_signal = dat.Data.get_data('entropy')
else:
raise ValueError
else:
raise NotFoundInHdfError(f'Did not find AWG in Logs and did not find entropy_x, entropy_y or entropy in data keys')
return x, entropy_signal, centers
# class NewEntropy(DA.FittingAttribute):
# version = '1.1'
# group_name = 'Entropy'
#
# """
# Versions:
# 1.1 -- 20-7-20: Changed average_data to use centers not center_ids. Better way to average data
# """
#
# def __init__(self, dat):
# self.angle = None # type: Union[float, None]
# super().__init__(dat)
#
# def get_from_HDF(self):
# super().get_from_HDF() # Gets self.x/y/avg_fit/all_fits
# dg = self.group.get('Data', None)
# if dg is not None:
# self.data = dg.get('entropy_r', None)
# self.avg_data = dg.get('avg_entropy_r', None)
# self.avg_data_err = dg.get('avg_entropy_r_err', None)
# self.angle = self.group.attrs.get('angle', None)
#
# def update_HDF(self):
# super().update_HDF()
# self.group.attrs['angle'] = self.angle
#
# def recalculate_entr(self, centers, x_array=None):
# """
# Recalculate entropy r from 'entropy x' and 'entropy y' in HDF using center positions provided on x_array if
# provided otherwise on original x_array.
#
# Args:
# centers (np.ndarray): Center positions in units of x_array (either original or passed)
# x_array (np.ndarray): Option to pass an x_array that centers were defined on
#
# Returns:
# None: Sets self.data, self.angle, self.avg_data
# """
# x = x_array if x_array is not None else self.x
# dg = self.group['Data']
# entx = dg.get('entropy_x', None)
# enty = dg.get('entropy_y', None)
# assert entx not in [None, np.nan]
# if enty is None or enty.size == 1:
# entr = CU.center_data(x, entx, centers) # To match entr which gets centered by calc_r
# angle = 0.0
# else:
# entr, angle = calc_r(entx, enty, x=x, centers=centers)
# self.data = entr
# self.angle = angle
#
# self.set_avg_data(centers='None') # Because entr is already centered now
# self.update_HDF()
#
# def _set_data_hdf(self, **kwargs):
# super()._set_data_hdf(data_name='entropy_r')
#
# def run_row_fits(self, params=None, **kwargs):
# super().run_row_fits(entropy_fits, params=params)
#
# def _set_row_fits_hdf(self):
# super()._set_row_fits_hdf()
#
# def set_avg_data(self, centers=None, x_array=None):
# if centers is not None:
# logger.warning(f'Using centers to average entropy data, but data is likely already centered!')
# super().set_avg_data(centers=centers, x_array=x_array) # sets self.avg_data/avg_data_err and saves to HDF
#
# def _set_avg_data_hdf(self):
# dg = self.group['Data']
# HDU.set_data(dg, 'avg_entropy_r', self.avg_data)
# HDU.set_data(dg, 'avg_entropy_r_err', self.avg_data_err)
#
# def run_avg_fit(self, params=None, **kwargs):
# super().run_avg_fit(entropy_fits, params=params) # sets self.avg_fit and saves to HDF
#
# def _set_avg_fit_hdf(self):
# super()._set_avg_fit_hdf()
#
# def _check_default_group_attrs(self):
# super()._check_default_group_attrs()
#
# def _get_centers_from_transition(self):
# assert 'Transition' in self.hdf.keys()
# tg = self.hdf['Transition'] # type: h5py.Group
# rg = tg.get('Row fits', None)
# if rg is None:
# raise AttributeError("No Rows Group in self.hdf['Transition'], this must be initialized first")
# fit_infos = DA.rows_group_to_all_FitInfos(rg)
# x = self.x
# return CU.get_data_index(x, [fi.best_values.mid for fi in fit_infos])
def calc_r(entx, enty, x=None, centers=None):
"""
Calculate R using constant phase determined at largest signal value of averaged data
Args:
entx (np.ndarray): Entropy x signal (1D or 2D)
enty (np.ndarray): Entropy y signal (1D or 2D)
x (np.ndarray): x_array for centering data with center values
centers (np.ndarray): Center of transition to center data on
Returns:
(np.ndarray, float): 1D or 2D entropy r, phase angle
"""
entx = np.atleast_2d(entx)
enty = np.atleast_2d(enty)
if x is None or centers is None:
logger.warning('Not using centers to center data because x or centers missing')
entxav = np.nanmean(entx, axis=0)
entyav = np.nanmean(enty, axis=0)
else:
entxav = CU.mean_data(x, entx, centers, return_std=False)
entyav = CU.mean_data(x, enty, centers, return_std=False)
x_max, y_max, which = _get_max_and_sign_of_max(entxav, entyav) # Gets max of x and y at same location
# and which was bigger
angle = np.arctan(y_max / x_max)
entr = np.array([x * np.cos(angle) + y * np.sin(angle) for x, y in zip(entx, enty)])
entangle = angle
if entr.shape[0] == 1: # Return to 1D if only one row of data
entr = np.squeeze(entr, axis=0)
return entr, entangle
def get_param_estimates(x_array, data, mids=None, thetas=None) -> List[lm.Parameters]:
if data.ndim == 1:
return [_get_param_estimates_1d(x_array, data, mids, thetas)]
elif data.ndim == 2:
mids = mids if mids is not None else [None] * data.shape[0]
thetas = thetas if thetas is not None else [None] * data.shape[0]
return [_get_param_estimates_1d(x_array, z, mid, theta) for z, mid, theta in zip(data, mids, thetas)]
def _get_param_estimates_1d(x, z, mid=None, theta=None) -> lm.Parameters:
"""Returns estimate of params and some reasonable limits. Const forced to zero!!"""
params = lm.Parameters()
dT = np.nanmax(z) - np.nanmin(z)
if mid is None:
mid = (x[np.nanargmax(z)] + x[np.nanargmin(z)]) / 2 #
if theta is None:
theta = abs((x[np.nanargmax(z)] - x[np.nanargmin(z)]) / 2.5)
params.add_many(('mid', mid, True, None, None, None, None),
('theta', theta, True, 0, 500, None, None),
('const', 0, False, None, None, None, None),
('dS', 0, True, -5, 5, None, None),
('dT', dT, True, -10, 50, None, None))
return params
def entropy_1d(x, z, params: lm.Parameters = None, auto_bin=False):
entropy_model = lm.Model(entropy_nik_shape)
z = | pd.Series(z, dtype=np.float32) | pandas.Series |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from io import StringIO
import os
from pathlib import Path
import warnings
import numpy as np
import pytest
from pandas.errors import (
EmptyDataError,
ParserError,
)
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
with parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
) as reader:
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
def test_read_csv_raises_on_header_prefix(all_parsers):
# gh-27394
parser = all_parsers
msg = "Argument prefix must be None if argument header is not None"
s = StringIO("0,1\n2,3")
with pytest.raises(ValueError, match=msg):
parser.read_csv(s, header=0, prefix="_X")
def test_unexpected_keyword_parameter_exception(all_parsers):
# GH-34976
parser = all_parsers
msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg.format("read_csv")):
parser.read_csv("foo.csv", foo=1)
with pytest.raises(TypeError, match=msg.format("read_table")):
parser.read_table("foo.tsv", foo=1)
def test_suppress_error_output(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = | DataFrame({"a": [1, 4]}) | pandas.DataFrame |
import scipy.interpolate as sci
import geopandas as gpd
import shapely as shp
import random as random
import math
import arrow
import pandas as pd
import functools
import emeval.metrics.dist_calculations as emd
import emeval.input.spec_details as eisd
random.seed(1)
####
# BEGIN: Building blocks of the final implementations
####
####
# BEGIN: NORMALIZATION
####
# In addition to filtering the sensed values in the polygons, we should also
# really filter the ground truth values in the polygons, since there is no
# ground truth within the polygon However, ground truth points are not known to
# be dense, and in some cases (e.g. commuter_rail_aboveground), there is a
# small gap between the polygon border and the first point outside it. We
# currently ignore this distance
def fill_gt_linestring(e):
section_gt_shapes = gpd.GeoSeries(eisd.SpecDetails.get_shapes_for_leg(e["ground_truth"]["leg"]))
e["ground_truth"]["gt_shapes"] = section_gt_shapes
e["ground_truth"]["linestring"] = emd.filter_ground_truth_linestring(e["ground_truth"]["gt_shapes"])
e["ground_truth"]["utm_gt_shapes"] = section_gt_shapes.apply(lambda s: shp.ops.transform(emd.to_utm_coords, s))
e["ground_truth"]["utm_linestring"] = emd.filter_ground_truth_linestring(e["ground_truth"]["utm_gt_shapes"])
def to_gpdf(location_df):
return gpd.GeoDataFrame(
location_df, geometry=location_df.apply(
lambda lr: shp.geometry.Point(lr.longitude, lr.latitude), axis=1))
def get_int_aligned_trajectory(location_df, tz="UTC"):
lat_fn = sci.interp1d(x=location_df.ts, y=location_df.latitude)
lon_fn = sci.interp1d(x=location_df.ts, y=location_df.longitude)
# In order to avoid extrapolation, we use ceil for the first int and floor
# for the last int
first_int_ts = math.ceil(location_df.ts.iloc[0])
last_int_ts = math.floor(location_df.ts.iloc[-1])
new_ts_range = [float(ts) for ts in range(first_int_ts, last_int_ts, 1)]
new_fmt_time_range = [arrow.get(ts).to(tz) for ts in new_ts_range]
new_lat = lat_fn(new_ts_range)
new_lng = lon_fn(new_ts_range)
new_gpdf = gpd.GeoDataFrame({
"latitude": new_lat,
"longitude": new_lng,
"ts": new_ts_range,
"fmt_time": new_fmt_time_range,
"geometry": [shp.geometry.Point(x, y) for x, y in zip(new_lng, new_lat)]
})
return new_gpdf
####
# END: NORMALIZATION
####
####
# BEGIN: DISTANCE CALCULATION
####
def add_gt_error_projection(location_gpdf, gt_linestring):
location_gpdf["gt_distance"] = location_gpdf.distance(gt_linestring)
location_gpdf["gt_projection"] = location_gpdf.geometry.apply(
lambda p: gt_linestring.project(p))
def add_t_error(location_gpdf_a, location_gpdf_b):
location_gpdf_a["t_distance"] = location_gpdf_a.distance(location_gpdf_b)
location_gpdf_b["t_distance"] = location_gpdf_a.t_distance
def add_self_project(location_gpdf_a):
loc_linestring = shp.geometry.LineString(coordinates=list(zip(
location_gpdf.longitude, location_gdpf.latitude)))
location_gpdf["s_projection"] = location_gpdf.geometry.apply(
lambda p: loc_linestring.project(p))
####
# END: DISTANCE CALCULATION
####
####
# BEGIN: MERGE
####
# Assumes both entries exist
def b_merge_midpoint(loc_row):
# print("merging %s" % loc_row)
assert not pd.isnull(loc_row.geometry_i) and not pd.isnull(loc_row.geometry_a)
midpoint = shp.geometry.LineString(coordinates=[loc_row.geometry_a, loc_row.geometry_i]).interpolate(0.5, normalized=True)
# print(midpoint)
final_geom = (midpoint, "midpoint")
return final_geom
def b_merge_random(loc_row):
# print("merging %s" % loc_row)
assert not pd.isnull(loc_row.geometry_i) and not | pd.isnull(loc_row.geometry_a) | pandas.isnull |
# FIT DATA TO A CURVE
# <NAME> - MIT Licence
# inspired by @dimgrr. Based on
# https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509?gi=9c7c4ade0880
# https://github.com/venkatesannaveen/python-science-tutorial/blob/master/curve-fitting/curve-fitting-tutorial.ipynb
# https://www.reddit.com/r/CoronavirusUS/comments/fqx8fn/ive_been_working_on_this_extrapolation_for_the/
# to explore : https://github.com/fcpenha/Gompertz-Makehan-Fit/blob/master/script.py
# Import required packages
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.dates as mdates
import copy, math
from lmfit import Model
import pandas as pd
import streamlit as st
import datetime as dt
from datetime import datetime, timedelta
import matplotlib.animation as animation
import imageio
import streamlit.components.v1 as components
import os
import platform
import webbrowser
from pandas import read_csv, Timestamp, Timedelta, date_range
from io import StringIO
from numpy import log, exp, sqrt, clip, argmax, put
from scipy.special import erfc, erf
from matplotlib.pyplot import subplots
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import ConciseDateFormatter, AutoDateLocator
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from PIL import Image
import glob
# Functions to calculate values a,b and c ##########################
def exponential(x, a, b, c):
''' Standard gompertz function
a = height, b= halfway point, c = growth rate
https://en.wikipedia.org/wiki/Gompertz_function '''
return a * np.exp(-b * np.exp(-c * x))
def derivate(x, a, b, c):
''' First derivate of the Gompertz function. Might contain an error'''
return (np.exp(b * (-1 * np.exp(-c * x)) - c * x) * a * b * c ) + BASEVALUE
#return a * b * c * np.exp(-b*np.exp(-c*x))*np.exp(-c*x)
def derivate_of_derivate(x,a,b,c):
return a*b*c*(b*c*exp(-c*x) - c)*exp(-b*exp(-c*x) - c*x)
def gaussian(x, a, b, c):
''' Standard Guassian function. Doesnt give results, Not in use'''
return a * np.exp(-np.power(x - b, 2) / (2 * np.power(c, 2)))
def gaussian_2(x, a, b, c):
''' Another gaussian fuctnion. in use
a = height, b = cen (?), c= width '''
return a * np.exp(-((x - b) ** 2) / c)
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b)))))
# https://replit.com/@jsalsman/COVID19USlognormals
def lognormal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * erfc(- (log(x) - mu) / (s * sqrt(2)))
# https://en.wikipedia.org/wiki/Log-normal_distribution#Cumulative_distribution_function
def normal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * (1 + erf((x - mu) / (s * sqrt(2))))
# #####################################################################
def find_gaussian_curvefit(x_values, y_values):
try:
popt_g2, pcov_g2 = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[0, 0, 0],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
except RuntimeError as e:
str_e = str(e)
st.error(f"gaussian fit :\n{str_e}")
return tuple(popt_g2)
def use_curvefit(x_values, x_values_extra, y_values, title, daterange,i):
"""
Use the curve-fit from scipy.
IN : x- and y-values. The ___-extra are for "predicting" the curve
"""
with _lock:
st.subheader(f"Curvefit (scipy) - {title}")
fig1x = plt.figure()
try:
a_start, b_start, c_start = 0,0,0
popt, pcov = curve_fit(
f=exponential,
xdata=x_values,
ydata=y_values,
#p0=[4600, 11, 0.5],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
exponential(x_values_extra, *popt),
"r-",
label="exponential fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Exponential fit :\n{str_e}")
try:
popt_d, pcov_d = curve_fit(
f=derivate,
xdata=x_values,
ydata=y_values,
#p0=[0, 0, 0],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
derivate(x_values_extra, *popt_d),
"g-",
label="derivate fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_d),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Derivate fit :\n{str_e}")
# FIXIT
# try:
# popt_growth, pcov_growth = curve_fit(
# f=growth,
# xdata=x_values,
# ydata=y_values,
# p0=[500, 0.0001],
# bounds=(-np.inf, np.inf),
# maxfev=10000,
# )
# plt.plot(
# x_values_extra,
# growth(x_values_extra, *popt_growth),
# "y-",
# label="growth: a=%5.3f, b=%5.3f" % tuple(popt_growth),
# )
# except:
# st.write("Error with growth model fit")
try:
popt_g, pcov_g = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[a_start, b_start, c_start ],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
gaussian_2(x_values_extra, *popt_g),
"b-",
label="gaussian fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_g),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Gaussian fit :\n{str_e}")
plt.scatter(x_values, y_values, s=20, color="#00b3b3", label="Data")
plt.legend()
plt.title(f"{title} / curve_fit (scipy)")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
# POGING OM DATUMS OP DE X-AS TE KRIJGEN (TOFIX)
# plt.xlim(daterange[0], daterange[-1])
# lay-out of the x axis
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# interval_ = 5
# plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=interval_))
# plt.gcf().autofmt_xdate()
#plt.show()
filename= (f"{OUTPUT_DIR}scipi_{title}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1x)
# def make_gif(filelist):
# # Create the frames
# frames = []
# imgs = glob.glob("*.png")
# for i in imgs:
# new_frame = Image.open(i)
# frames.append(new_frame)
#
# # Save into a GIF file that loops forever
# frames[0].save('png_to_gif.gif', format='GIF',
# append_images=frames[1:],
# save_all=True,
# duration=300, loop=0)
def use_lmfit(x_values, y_values, functionlist, title,i, max_y_values):
"""
Use lmfit.
IN : x- and y-values.
functionlist (which functions to use)
adapted from https://stackoverflow.com/a/49843706/4173718
TODO: Make all graphs in one graph
"""
a_start, b_start, c_start = 0,0,0
for function in functionlist:
#placeholder0.subheader(f"LMFIT - {title} - {function}")
# create a Model from the model function
if function == "exponential":
bmodel = Model(exponential)
formula = "a * np.exp(-b * np.exp(-c * x))"
elif function == "derivate":
bmodel = Model(derivate)
formula = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "gaussian":
bmodel = Model(gaussian_2)
formula = "a * np.exp(-((x - b) ** 2) / c)"
else:
st.write("Please choose a function")
st.stop()
# create Parameters, giving initial values
#params = bmodel.make_params(a=4711, b=12, c=0.06)
params = bmodel.make_params(a=a_start, b=b_start, c=c_start) # IC BEDDEN MAART APRIL
# params = bmodel.make_params()
params["a"].min = a_start
params["b"].min = b_start
params["c"].min = c_start
# do fit, st.write result
result = bmodel.fit(y_values, params, x=x_values)
a = round(result.params['a'].value,5)
b= round(result.params['b'].value,5)
c =round(result.params['c'].value,5)
placeholder1.text(result.fit_report())
with _lock:
#fig1y = plt.figure()
fig1y, ax1 = plt.subplots()
ax2 = ax1.twinx()
# plot results -- note that `best_fit` is already available
ax1.scatter(x_values, y_values, color="#00b3b3", s=2)
#ax1.plot(x_values, result.best_fit, "g")
res = (f"a: {a} / b: {b} / c: {c}")
plt.title(f"{title} / lmfit - {function}\n{formula}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
ax1.plot(t, bmodel.eval(result.params, x=t), "r-")
ax2.plot (t, derivate_of_derivate(t,a,b,c), color = 'purple')
ax2.axhline(linewidth=1, color='purple', alpha=0.5, linestyle="--")
#ax1.plot (t, derivate(t,26660.1, 9.01298, 0.032198), color = 'purple')
#ax2.plot (t, derivate_of_derivate(t,26660.1, 9.01298, 0.032198), color = 'yellow')
#plt.ylim(bottom=0)
#ax1.ylim(0, max_y_values*1.1)
#ax1.set_ylim(510,1200)
#ax2.set_ylim(0,12)
ax1.set_xlabel(f"Days from {from_}")
ax1.set_ylabel(f"{title} - red")
ax2.set_ylabel("delta - purple")
#plt.show()
filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
placeholder.pyplot(fig1y)
if prepare_for_animation == False:
with _lock:
fig1z = plt.figure()
# plot results -- note that `best_fit` is already available
if function == "exponential":
plt.plot(t, derivate(t,a,b,c))
function_x = "derivate"
formula_x = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "derivate":
plt.plot(t, exponential(t, a,b,c))
function_x = "exponential"
formula_x = "a * np.exp(-b * np.exp(-c * x))"
else:
st.error("ERROR")
st.stop()
plt.title(f"{title} / {function_x}\n{formula_x}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
#plt.plot(t, bmodel.eval(result.params, x=t), "r-")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
plt.ylabel(title)
#plt.show()
#filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
#plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1z)
return filename
def fit_the_values_really(x_values, y_values, which_method, title, daterange,i, max_y_values):
x_values_extra = np.linspace(
start=0, stop=TOTAL_DAYS_IN_GRAPH - 1, num=TOTAL_DAYS_IN_GRAPH
)
x_values = x_values[:i]
y_values = y_values[:i]
if prepare_for_animation == False:
use_curvefit(x_values, x_values_extra, y_values, title, daterange,i)
return use_lmfit(x_values,y_values, [which_method], title,i, max_y_values)
def fit_the_values(to_do_list , total_days, daterange, which_method, prepare_for_animation):
"""
We are going to fit the values
"""
# Here we go !
st.header("Fitting data to formulas")
infox = (
'<br>Exponential / Standard gompertz function : <i>a * exp(-b * np.exp(-c * x))</i></li>'
'<br>First derivate of the Gompertz function : <i>a * b * c * exp(b * (-1 * exp(-c * x)) - c * x)</i></li>'
'<br>Gaussian : <i>a * exp(-((x - b) ** 2) / c)</i></li>'
'<br>Working on growth model: <i>(a * 0.5 ^ (x / (4 * (math.log(0.5) / math.log(b)))))</i> (b will be the Rt-number)</li>'
)
st.markdown(infox, unsafe_allow_html=True)
global placeholder0, placeholder, placeholder1
placeholder0 = st.empty()
placeholder = st.empty()
placeholder1 = st.empty()
el = st.empty()
for v in to_do_list:
title = v[0]
y_values = v[1]
max_y_values = max(y_values)
# some preperations
number_of_y_values = len(y_values)
global TOTAL_DAYS_IN_GRAPH
TOTAL_DAYS_IN_GRAPH = total_days # number of total days
x_values = np.linspace(start=0, stop=number_of_y_values - 1, num=number_of_y_values)
if prepare_for_animation == True:
filenames = []
for i in range(5, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
filenames.append(filename)
# build gif
with imageio.get_writer('mygif.gif', mode='I') as writer:
for filename_ in filenames:
image = imageio.imread(f"{filename_}.png")
writer.append_data(image)
webbrowser.open('mygif.gif')
# Remove files
for filename__ in set(filenames):
os.remove(f"{filename__}.png")
else:
for i in range(len(x_values)-1, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
# FIXIT
# aq, bq, cq = find_gaussian_curvefit(x_values, y_values)
# st.write(f"Find Gaussian curvefit - a:{aq} b:{bq} c: {cq}")
def select_period(df, show_from, show_until):
""" _ _ _ """
if show_from is None:
show_from = "2020-2-27"
if show_until is None:
show_until = "2020-4-1"
mask = (df[DATEFIELD].dt.date >= show_from) & (df[DATEFIELD].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
def normal_c(df):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Normal_c")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// Timedelta('1d')).values # small day-of-year integers
yi = df['Total_reported_cumm'].values # dependent
yd = df['Deceased_cumm'].values # dependent
exrange = range((Timestamp(nextday)
- Timestamp(firstday)) // Timedelta('1d'),
(Timestamp(lastday) + Timedelta('1d')
- Timestamp(firstday)) // Timedelta('1d')) # day-of-year ints
indates = date_range(df.index[0], df.index[-1])
exdates = date_range(nextday, lastday)
ax.scatter(indates, yi, color="#00b3b3", label='Infected')
ax.scatter(indates, yd, color="#00b3b3", label='Dead')
sqrt2 = sqrt(2)
im = Model(normal_c)
st.write (x)
iparams = im.make_params(s=0.3, mu=4.3, h=16.5)
st.write (iparams)
#iparams['s'].min = 0; iparams['h'].min = 0
iresult = im.fit(log(yi+1), iparams, x=x)
st.text('---- Infections:\n' + iresult.fit_report())
ax.plot(indates, exp(iresult.best_fit)-1, 'b', label='Infections fit')
ipred = iresult.eval(x=exrange)
ax.plot(exdates, exp(ipred)-1, 'b--',
label='Forecast: {:,.0f}'.format(exp(ipred[-1])-1))
iupred = iresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
iintlow = clip(ipred-iupred, ipred[0], None)
put(iintlow, range(argmax(iintlow), len(iintlow)), iintlow[argmax(iintlow)])
ax.fill_between(exdates, exp(iintlow), exp(ipred+iupred), alpha=0.35, color='b')
dm = Model(normal_c)
dparams = dm.make_params(s=19.8, mu=79.1, h=11.4) # initial guesses
dparams['s'].min = 0; iparams['h'].min = 0
dresult = dm.fit(log(yd+1), dparams, x=x)
st.text('---- Deaths:\n' + dresult.fit_report())
ax.plot(indates, exp(dresult.best_fit)-1, 'r', label='Deaths fit')
dpred = dresult.eval(x=exrange)
ax.plot(exdates, exp(dpred)-1, 'r--',
label='Forecast: {:,.0f}'.format(exp(dpred[-1])-1))
dupred = dresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
dintlow = clip(dpred-dupred, log(max(yd)+1), None)
put(dintlow, range(argmax(dintlow), len(dintlow)), dintlow[argmax(dintlow)])
ax.fill_between(exdates, exp(dintlow), exp(dpred+dupred), alpha=0.35, color='r')
ax.fill_between(exdates, 0.012 * (exp(iintlow)), 0.012 * (exp(ipred+iupred)),
alpha=0.85, color='g', label='Deaths from observed fatality rate')
ax.set_xlim(df.index[0], lastday)
#ax.set_yscale('log') # semilog
#ax.set_ylim(0, 1500000)
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) # comma separators
ax.grid()
ax.legend(loc="upper left")
ax.xaxis.set_major_formatter(ConciseDateFormatter(AutoDateLocator(), show_offset=False))
ax.set_xlabel('95% prediction confidence intervals shaded')
#fig.savefig('plot.png', bbox_inches='tight')
#print('\nTO VIEW GRAPH: click on plot.png in the file pane to the left.')
#fig.show()
st.pyplot(fig1yz)
st.text('Infections at end of period shown: {:,.0f}. Deaths: {:,.0f}.'.format(
exp(ipred[-1])-1, exp(dpred[-1])-1))
def loglognormal(df, what_to_display):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Log Normal")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// Timedelta('1d')).values # small day-of-year integers
yi = df[what_to_display].values # dependent
#yd = df['Deceased_cumm'].values # dependent
exrange = range((Timestamp(nextday)
- Timestamp(firstday)) // Timedelta('1d'),
(Timestamp(lastday) + Timedelta('1d')
- | Timestamp(firstday) | pandas.Timestamp |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert_isinstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
self.float_index = tm.makeFloatIndex(10)
self.dt_index = tm.makeDateIndex(10)
self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10)
self.string_index = tm.makeStringIndex(10)
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index)
self.float_series = Series(arr, index=self.int_index)
self.dt_series = Series(arr, index=self.dt_index)
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index)
self.string_series = Series(arr, index=self.string_index)
types = ['int','float','dt', 'dt_tz', 'period','string']
self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index,op),index=o.index)
else:
expected = getattr(o,op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o,op)
# these couuld be series, arrays or scalars
if isinstance(result,Series) and isinstance(expected,Series):
tm.assert_series_equal(result,expected)
elif isinstance(result,Index) and isinstance(expected,Index):
tm.assert_index_equal(result,expected)
elif isinstance(result,np.ndarray) and isinstance(expected,np.ndarray):
self.assert_numpy_array_equal(result,expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not defined
# we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError, otherwise
# an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda : getattr(o,op))
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
def test_ops(self):
tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq)
try:
self.assertEqual(result, expected)
except ValueError:
# comparing tz-aware series with np.array results in ValueError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max','min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
if isinstance(o, DatetimeIndex):
# DatetimeIndex.unique returns DatetimeIndex
self.assertTrue(o.unique().equals(klass(values)))
else:
self.assert_numpy_array_equal(o.unique(), values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if o.values.dtype == 'int64':
# skips int64 because it doesn't allow to include nan or None
continue
if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7:
# Unable to assign None
continue
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]':
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
if isinstance(o, DatetimeIndex):
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')
tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, DatetimeIndex):
self.assertTrue(result[0] is pd.NaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
hist = s.value_counts(sort=False)
hist.sort()
expected = Series([3, 1, 4, 2], index=list('acbd'))
expected.sort()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
# bins
self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({0.998: 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({0.998: 1.0})
tm.assert_series_equal(res1n, exp1n)
self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3]))
self.assertEqual(s1.nunique(), 3)
res4 = s1.value_counts(bins=4)
exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array([]))
self.assertEqual(s.nunique(), 0)
# GH 3002, datetime64[ns]
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG',
'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, DatetimeIndex):
expected = DatetimeIndex(expected)
self.assertTrue(s.unique().equals(expected))
else:
self.assert_numpy_array_equal(s.unique(), expected)
self.assertEqual(s.nunique(), 3)
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
self.assertEqual(unique.dtype, 'datetime64[ns]')
# numpy_array_equal cannot compare pd.NaT
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT)
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td)
result = td.value_counts()
expected_s = Series([6], index=[86400000000000])
self.assertEqual(result.index.dtype, 'int64')
tm.assert_series_equal(result, expected_s)
# get nanoseconds to compare
expected = np.array([86400000000000])
self.assert_numpy_array_equal(td.unique(), expected)
self.assertEqual(td.nunique(), 1)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2)
result2 = td2.value_counts()
self.assertEqual(result2.index.dtype, 'int64')
tm.assert_series_equal(result2, expected_s)
self.assert_numpy_array_equal(td.unique(), expected)
self.assertEqual(td.nunique(), 1)
def test_factorize(self):
for o in self.objs:
exp_arr = np.array(range(len(o)))
labels, uniques = o.factorize()
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
for o in self.objs:
# sort by value, and create duplicates
if isinstance(o, Series):
o.sort()
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels, uniques = n.factorize(sort=True)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4])
labels, uniques = n.factorize(sort=False)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(np.concatenate([o.values[5:10], o.values[:5]]))
self.assert_numpy_array_equal(uniques, expected)
else:
expected = o[5:].append(o[:5])
self.assertTrue(uniques.equals(expected))
class TestDatetimeIndexOps(Ops):
_allowed = '_allow_datetime_index_ops'
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEquals(s.year,2000)
self.assertEquals(s.month,1)
self.assertEquals(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
# monotonic
idx1 = pd.DatetimeIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """<class 'pandas.tseries.index.DatetimeIndex'>
Length: 0, Freq: D, Timezone: None"""
exp2 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01]
Length: 1, Freq: D, Timezone: None"""
exp3 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, 2011-01-02]
Length: 2, Freq: D, Timezone: None"""
exp4 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, ..., 2011-01-03]
Length: 3, Freq: D, Timezone: None"""
exp5 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00]
Length: 3, Freq: H, Timezone: Asia/Tokyo"""
exp6 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00-05:00, ..., NaT]
Length: 3, Freq: None, Timezone: US/Eastern"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
class TestPeriodIndexOps(Ops):
_allowed = '_allow_period_index_ops'
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), | pd.Period('2011-01-01', freq='D') | pandas.Period |
#!/usr/bin/env python3
import sys
import numpy as np
import pandas as pd
import os, shutil, zipfile
from numpy import array
import csv
from pandas import DataFrame
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from scipy.stats import entropy
import scipy as sc
from zipfile import ZipFile
import joblib
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation,Conv2D,MaxPooling2D,Flatten,Conv1D, GlobalMaxPooling1D,MaxPooling1D, Convolution2D,Reshape, InputLayer,LSTM, Embedding
from keras.optimizers import SGD
from sklearn import preprocessing
from keras.callbacks import EarlyStopping
from numpy import array
from keras.preprocessing import sequence
from keras.layers import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.layers import TimeDistributed
def load_sepsis_model():
# Load the saved model pickle file
Trained_model = joblib.load('saved_model.pkl')
return Trained_model
def get_sepsis_score(data1, Trained_model):
#Testing
t=1
df_test = np.array([], dtype=np.float64)
df_test1 = pd.DataFrame()
l = len(data1)
df_test = data1
df_test1 = pd.DataFrame(df_test)
df_test2 = df_test1
df_test2.columns = ['HR','O2Sat','Temp','SBP','MAP','DBP','Resp','EtCO2','BaseExcess','HCO3','FiO2','pH','PaCO2','SaO2','AST','BUN','Alkalinephos','Calcium','Chloride','Creatinine','Bilirubin_direct','Glucose','Lactate','Magnesium','Phosphate','Potassium','Bilirubin_total','TroponinI','Hct','Hgb','PTT','WBC','Fibrinogen','Platelets','Age','Gender','Unit1','Unit2','HospAdmTime','ICULOS']
#Forward fill missing values
df_test2.fillna(method='ffill', axis=0, inplace=True)
df_test3 = df_test2.fillna(0)
df_test = df_test3
df_test['ID'] = 0
DBP = pd.pivot_table(df_test,values='DBP',index='ID',columns='ICULOS')
O2Sat = pd.pivot_table(df_test,values='O2Sat',index='ID',columns='ICULOS')
Temp = pd.pivot_table(df_test,values='Temp',index='ID',columns='ICULOS')
RR = pd.pivot_table(df_test,values='Resp',index='ID',columns='ICULOS')
BP = pd.pivot_table(df_test,values='SBP',index='ID',columns='ICULOS')
latest = pd.pivot_table(df_test,values='HR',index='ID',columns='ICULOS')
Fibrinogen = pd.pivot_table(df_test,values='Fibrinogen',index='ID',columns='ICULOS')
Glucose = pd.pivot_table(df_test,values='Glucose',index='ID',columns='ICULOS')
HCO3 = pd.pivot_table(df_test,values='HCO3',index='ID',columns='ICULOS')
WBC = pd.pivot_table(df_test,values='WBC',index='ID',columns='ICULOS')
HospAdmTime = pd.pivot_table(df_test,values='HospAdmTime',index='ID',columns='ICULOS')
EtCO2 = pd.pivot_table(df_test,values='EtCO2',index='ID',columns='ICULOS')
BaseExcess = pd.pivot_table(df_test,values='BaseExcess',index='ID',columns='ICULOS')
Creatinine = pd.pivot_table(df_test,values='Creatinine',index='ID',columns='ICULOS')
Platelets = pd.pivot_table(df_test,values='Platelets',index='ID',columns='ICULOS')
age = pd.pivot_table(df_test,values='Age',index='ID',columns='ICULOS')
gender = pd.pivot_table(df_test,values='Gender',index='ID',columns='ICULOS')
Heart_rate_test = latest
RR_test = RR
BP_test = BP
DBP_test = DBP
Temp_test = Temp
O2Sat_test = O2Sat
result = Heart_rate_test
result = result.fillna(0)
RR_test = RR_test.fillna(0)
BP_test = BP_test.fillna(0)
Temp_test = Temp_test.fillna(0)
DBP_test = DBP_test.fillna(0)
O2Sat_test = O2Sat_test.fillna(0)
age = age.fillna(0)
gender = gender.fillna(0)
HospAdmTime_test2 = HospAdmTime.fillna(0)
EtCO2_test2 = EtCO2.fillna(0)
BaseExcess_test2 = BaseExcess.fillna(0)
Creatinine_test2 = Creatinine.fillna(0)
Platelets_test2 = Platelets.fillna(0)
WBC2_test = WBC.fillna(0)
HCO32_test = HCO3.fillna(0)
Glucose2_test = Glucose.fillna(0)
Fibrinogen2_test = Fibrinogen.fillna(0)
#Since we are using a windows-based approach (6-hour window size), we pad our output for the 6 hours following patients admission.
#Get dataframe of probs
#Windows based approach
Heart_rate_test = result.iloc[:, 0:l]
RR2_test = RR_test.iloc[:, 0:l]
BP2_test = BP_test.iloc[:, 0:l]
Temp2_test = Temp_test.iloc[:, 0:l]
DBP2_test = DBP_test.iloc[:, 0:l]
O2Sat2_test = O2Sat_test.iloc[:, 0:l]
HospAdmTime_test = HospAdmTime_test2.iloc[:, 0:l]
EtCO22 = EtCO2_test2.iloc[:, 0:l]
BaseExcess2 = BaseExcess_test2.iloc[:, 0:l]
Creatinine2 = Creatinine_test2.iloc[:, 0:l]
Platelets2 = Platelets_test2.iloc[:, 0:l]
WBC2 = WBC2_test.iloc[:, 0:l]
gender2 = gender.iloc[:, 0:l]
HCO32 = HCO32_test.iloc[:, 0:l]
Glucose2 = Glucose2_test.iloc[:, 0:l]
Fibrinogen2 = Fibrinogen2_test.iloc[:, 0:l]
Overall_df_test = | pd.concat([Heart_rate_test, BP2_test, Temp2_test, RR2_test, DBP2_test, O2Sat2_test,HospAdmTime_test,EtCO22,BaseExcess2,Creatinine2,Platelets2,gender2,WBC2,HCO32,Glucose2,Fibrinogen2], axis=1) | pandas.concat |
from typing import Optional
import json
import sys
from pathlib import Path
import pandas as pd
import typer
from loguru import logger
from streamlit import cli as stcli
from litreading.config import DEFAULT_MODEL_SCALER
from litreading.grader import Grader
from litreading.trainer import ModelTrainer
from litreading.utils.files import save_to_file
app = typer.Typer()
logger.configure(handlers=[{"sink": sys.stderr, "level": "INFO"}])
@app.command()
def grade(
model_filepath: Path = typer.Argument(
...,
exists=True,
file_okay=True,
dir_okay=False,
writable=False,
readable=True,
resolve_path=True,
),
data_filepath: Path = typer.Argument(
...,
exists=True,
file_okay=True,
dir_okay=False,
writable=False,
readable=True,
resolve_path=True,
),
output_filepath: Optional[Path] = typer.Option(
None,
"-s",
file_okay=True,
dir_okay=False,
writable=True,
readable=False,
resolve_path=True,
),
):
df = pd.read_csv(data_filepath)
grades = Grader(model_filepath=model_filepath).grade(df)
grades = | pd.Series(grades) | pandas.Series |
import json
import pandas as pd
import time
from pycoingecko import CoinGeckoAPI
import requests
cg = CoinGeckoAPI()
class CoinPrice:
def __init__(self):
self.tsym = "cad"
self.IDList = cg.get_coins_list()
time.sleep(1.0)
self.priceList = {}
self.timeResolution = 'D' #use D?? for Day H for hourly, min for minutely
self.session =requests.Session()
self.useSearch = True
def getCoinID (self, sym: str) -> str:
#Gets the coingecko coinID
sym = sym.lower()
coinIDs = []
for coin in self.IDList :
if coin["symbol"] == sym:
#return coin["id"]
if "binance-peg" not in coin["id"]:
coinIDs.append(coin)
#no symbol found
#return ("none")
if (len(coinIDs) > 1):
i = 0
for coin in coinIDs:
print(i,": ", coin)
i += 1
selection = int(input("select a coinID to use (-1 for none): "))
if (selection < 0 ):
return "$none$"
elif(len(coinIDs) ==1):
selection = 0
elif(self.useSearch==True):
cantFind = True
while (cantFind) :
print ("Can't find this symbol " + sym)
ans = input("(m)anual or (n)one: ")
if (ans == "n"):
cantFind = False
return "$none$"
elif (ans =="m"):
s = " please enter manual replacement: "
sym2 = input(s)
coinID = self.getCoinID(sym2)
cantFind = False
return(coinID)
try:
return coinIDs[selection]['id']
except:
raise ValueError("Could not find the symbol : " + sym)
def convertDateToTS(self, dateString: str):
date = pd.to_datetime(dateString)
dateTS = date.timestamp()
return dateTS
def convertDateToTSStr(self, dateString: str):
date = str(self.convertDateToTS(dateString))
dateTS = date[:10]
return dateTS
def getCoinMarketYear(self, sym: str, dateString: str):
coinID = self.getCoinID(sym)
if (coinID == "$none$"):
return ({0:0.0})
date = | pd.to_datetime(dateString) | pandas.to_datetime |
import pandas as pd
import csv
import json
import io
from rltk.io.reader import *
arr = [{'1': 'A', '2': 'B'}, {'1': 'a', '2': 'b'}]
def test_array_reader():
for idx, obj in enumerate(ArrayReader(arr)):
assert obj == arr[idx]
def test_dataframe_reader():
df = | pd.DataFrame(arr) | pandas.DataFrame |
import multiprocessing
import numpy as np
import pandas as pd
import re
from pathlib import Path
from os import cpu_count
from tables.exceptions import HDF5ExtError
from src.patches import PatchSchema
from src.preset2fxp import *
FXP_CHUNK = 'chunk'
FXP_PARAMS = 'params'
DB_KEY = 'patches'
TAGS_KEY = 'tags'
PATCH_FILE = 'patch'
JOBS = min(4, cpu_count())
def updates(func):
"""Wrapper for functions that require an update of the database."""
def inner(self, *args, **kwargs):
ret = func(self, *args, **kwargs)
self.refresh()
return ret
return inner
class PatchDatabase:
"""Model for a pandas-based patch database conforming to a `PatchSchema`."""
__df: pd.DataFrame = None
__tags: pd.DataFrame
__knn = None
schema: PatchSchema
tags: pd.Index = pd.Index([])
banks = []
def __init__(self, schema: PatchSchema):
"""Constructs a new `PatchDatabase` instance following the `schema`."""
self.schema = schema
def bootstrap(self, root_dir: Path):
"""Creates a new database from the contents of the specified directory and loads the database."""
re_file = re.compile(self.schema.file_pattern)
files = filter(lambda f: re_file.match(f.name) is not None, root_dir.glob('**/*'))
meta = []
params = []
# Running *all* this I/O on a single thread is just so slow...
with multiprocessing.Pool(processes=JOBS) as pool:
for patch in pool.imap_unordered(self.schema.read_patchfile, files):
if patch:
params.append(patch['params'])
del patch['params']
meta.append(patch)
init_patch = pd.Series(
self.schema.values, index=self.schema.params, dtype=self.schema.param_dtype)
meta_df = pd.DataFrame(meta)
param_df = pd.DataFrame(params, columns=self.schema.params,
dtype=int).fillna(init_patch)
meta_df['bank'] = pd.Categorical(meta_df['bank'])
meta_df['tags'] = ''
for col, pos in self.schema.possibilites.items():
meta_df[col] = | pd.Categorical(meta_df[col], categories=pos) | pandas.Categorical |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = | DatetimeIndex(['20130102', pd.NaT, '20130105']) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created by <NAME> July 2021
This script reads the csv annotations from NIPS4Bplus and the species list from
NIPS4B to generate list of train and test files and dictionary label files
The dictionary files list random train and test sets for three selections of
classes: "All Classes", "Bird Classes" and "Bird Species"
It generates two separate test and train file sets. One for "All Classes" and a
different one "birds" for both "Bird Classes" and "Bird Species"
NIPS4Bplus annotations:
https://doi.org/10.6084/m9.figshare.6798548
NIPS4B species list:
http://sabiod.univ-tln.fr/nips4b/media/birds/NIPS4B_BIRD_CHALLENGE_TRAIN_LABELS.tar
Choose: nips4b_birdchallenge_espece_list.csv
Instructions
https://github.com/fbravosanchez/NIPS4Bplus#readme
"""
import sys
import os
import glob
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split as tr_te_split
def export_scp(file, name):
file.to_csv(os.path.join(output_path, '') + name +'.scp',index=False, header=False, encoding='utf-8')
def export_npy(file, name):
np.save(os.path.join(output_path, '') + name + '.npy', file)
#path to NIPS4Bplus csv annotation files
csv_path = sys.argv[1]
#path to NIPS4B species list
sps_list_file = sys.argv[2]
#path to cut files generated in cut_nips4bplus_files
# cut_files_path = sys.argv[3]
#output path for dictionary files
output_path = sys.argv[3]
if not os.path.exists(output_path):
os.makedirs(output_path )
#collect csv label file list
lbl_files = pd.DataFrame(glob.glob(os.path.join(csv_path, '') + '*.csv'))
lbl_files.columns = ['csv']
lbl_files['wav'] = 'nips4b_birds_trainfile' + lbl_files['csv'].str[-7:-4]
#read species list
sps_list = pd.read_csv(sps_list_file)
file_list = []
#process by csv file
for i, j in lbl_files.iterrows():
#skip empty files
try:
k = | pd.read_csv(j['csv'], header=None) | pandas.read_csv |
import sys
sys.path.insert(0, '/Users/david/galvanize/super_liga_xg')
from combined_player import player_minutes_value
from scraping_tools.html_scraper import db
# from html_scraper import db
from mongo_to_db import create_master_df
from model_prep import create_rf_prep, create_xG_df, create_summed_xG_df
import pickle
import pandas as pd
def create_complete_xg_tables():
"""creates complete table of player information + xG statistics based on
random_forest, gradient_boost, xgb_boost and ensemble as four dataframes
example: rf_df, gb_df, xgb_df, ens_df = create_complete_xg_tables()
"""
games = db['games_update'].find()
players = db.players.find()
final_df = player_minutes_value(games, players)
final_df['position_id'] = final_df['position_id'].replace(1, 'Goalie')
final_df['position_id'] = final_df['position_id'].replace(2, 'Defender')
final_df['position_id'] = final_df['position_id'].replace(3, 'Midfielder')
final_df['position_id'] = final_df['position_id'].replace(4, 'Forward')
final_df['position_id'] = final_df['position_id'].replace(5, 'Defender')
games = db['games_update'].find()
shots_df = create_master_df(games)
rf_model = pickle.load(open("../models/rfc.pkl", "rb"))
gb_model = pickle.load(open("../models/gb.pkl", "rb"))
xgb_model = pickle.load(open("../models/xgb.pkl", "rb"))
model_ready_df = create_rf_prep(shots_df)
columns = ['shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt']
p_random_forest = rf_model.predict_proba(model_ready_df[columns])
p_gradient_boost = gb_model.predict_proba(model_ready_df[columns])
p_xgboost = xgb_model.predict_proba(model_ready_df[columns])
p_ensemble = (p_random_forest + p_gradient_boost + p_xgboost) / 3
rf_xg = create_xG_df(model_ready_df, shots_df['is_goal'], p_random_forest)
rf_contributions = create_summed_xG_df(rf_xg)
gb_xg = create_xG_df(model_ready_df, shots_df['is_goal'], p_gradient_boost)
gb_contributions = create_summed_xG_df(gb_xg)
xgb_xg = create_xG_df(model_ready_df, shots_df['is_goal'], p_xgboost)
xgb_contributions = create_summed_xG_df(xgb_xg)
ensemble_xg = create_xG_df(model_ready_df, shots_df['is_goal'], p_ensemble)
ensemble_contributions = create_summed_xG_df(ensemble_xg)
rf_sl = pd.merge(rf_contributions, final_df, on=['player_id'])
gb_sl = pd.merge(gb_contributions, final_df, on=['player_id'])
xgb_sl = | pd.merge(xgb_contributions, final_df, on=['player_id']) | pandas.merge |
import pandas as pd
import json
def get_se_as_df(filename):
with open(filename) as f:
data = json.loads(f.read())
for record in data:
for key, value in record.items():
if type(value)==dict:
# extract only kWh
kWh = value['energy_kWh']
record[key] = kWh
df = pd.DataFrame(data)
# convert string to datetime object
df['created_on'] = | pd.to_datetime(df['created_on']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Tests computational time of different null methods and plots outputs
"""
from dataclasses import asdict, make_dataclass
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import threadpoolctl
from brainsmash import mapgen
from brainspace.null_models import moran
from netneurotools import (datasets as nndata,
freesurfer as nnsurf,
stats as nnstats)
from parspin import burt, simnulls, surface
from parspin.plotting import savefig
from parspin.simnulls import SPATNULLS
from parspin.utils import PARCHUES, SPATHUES
plt.rcParams['svg.fonttype'] = 'none'
plt.rcParams['font.sans-serif'] = ['Myriad Pro']
DATADIR = Path('./data/derivatives/supplementary/comp_time').resolve()
FIGDIR = Path('./figures/supplementary/comp_time').resolve()
ORDER = ('vertex', 'atl-cammoun2012', 'atl-schaefer2018')
ROIDIR = Path('./data/raw/rois').resolve()
SPDIR = Path('./data/derivatives/spins').resolve()
SIMDIR = Path('./data/derivatives/simulated').resolve()
DISTDIR = Path('./data/derivatives/geodesic').resolve()
OUTDIR = Path('./data/derivatives/supplementary/comp_time').resolve()
ALPHA = 'alpha-2.0'
N_PERM = 1000
N_REPEAT = 5
SEED = 1234
USE_CACHED = True
PARCS = (
('vertex', 'fsaverage5'),
('atl-cammoun2012', 'scale500'),
('atl-schaefer2018', '1000Parcels7Networks')
)
CompTime = make_dataclass(
'CompTime', ('parcellation', 'scale', 'spatnull', 'runtime')
)
def get_runtime(parcellation, scale, spatnull):
"""
Runs spatial null models for given combination of inputs
Parameters
----------
parcellation : str
Name of parcellation to be used
scale : str
Scale of `parcellation` to be used
spatnull : str
Name of spin method to be used
"""
# filenames (for I/O)
fn = SPDIR / parcellation / spatnull / f'{scale}_spins.csv'
# load simulated data
alphadir = SIMDIR / ALPHA
if parcellation == 'vertex':
x, y = simnulls.load_vertex_data(alphadir, sim=0)
else:
x, y = simnulls.load_parc_data(alphadir, parcellation, scale, sim=0)
# start timer (after loading data--accounds for diff b/w vertex/parc)
start = time.time()
# calculate the null p-values
if spatnull == 'naive-para':
nnstats.efficient_pearsonr(x, y, nan_policy='omit')[1]
nulls = None
elif spatnull == 'naive-nonpara':
nulls = naive_nonpara(y, fn=fn)
elif spatnull == 'vazquez-rodriguez':
nulls = vazquez_rodriguez(y, parcellation, scale, fn=fn)
elif spatnull == 'vasa':
nulls = vasa(y, parcellation, scale, fn=fn)
elif spatnull == 'hungarian':
nulls = hungarian(y, parcellation, scale, fn=fn)
elif spatnull == 'cornblath':
fn = SPDIR / 'vertex' / 'vazquez-rodriguez' / 'fsaverage5_spins.csv'
nulls = cornblath(y, parcellation, scale, fn=fn)
elif spatnull == 'baum':
nulls = baum(y, parcellation, scale, fn=fn)
elif spatnull in ('burt2018', 'burt2020', 'moran'):
nulls = make_surrogates(y, parcellation, scale, spatnull, fn=fn)
else:
raise ValueError(f'Invalid spatnull: {spatnull}')
if nulls is not None:
simnulls.calc_pval(x, y, nulls)
end = time.time()
ct = CompTime(parcellation, scale, spatnull, end - start)
print(ct)
return asdict(ct)
def _get_annot(parcellation, scale):
fetcher = getattr(nndata, f"fetch_{parcellation.replace('atl-', '')}")
return fetcher('fsaverage5', data_dir=ROIDIR)[scale]
def naive_nonpara(y, fn=None):
y = np.asarray(y)
rs = np.random.default_rng(SEED)
if USE_CACHED and fn is not None:
spins = simnulls.load_spins(fn, n_perm=N_PERM)
else:
spins = np.column_stack([
rs.permutation(len(y)) for f in range(N_PERM)
])
return y[spins]
def vazquez_rodriguez(y, parcellation, scale, fn=None):
y = np.asarray(y)
if USE_CACHED and fn is not None:
spins = simnulls.load_spins(fn, n_perm=N_PERM)
else:
if parcellation != 'vertex':
annot = _get_annot(parcellation, scale)
coords, hemi = nnsurf.find_parcel_centroids(lhannot=annot.lh,
rhannot=annot.rh,
version='fsaverage5',
surf='sphere',
method='surface')
else:
coords, hemi = nnsurf._get_fsaverage_coords(scale, 'sphere')
spins = nnstats.gen_spinsamples(coords, hemi, method='original',
n_rotate=N_PERM, seed=SEED)
return y[spins]
def vasa(y, parcellation, scale, fn=None):
y = np.asarray(y)
if USE_CACHED and fn is not None:
spins = simnulls.load_spins(fn, n_perm=N_PERM)
else:
annot = _get_annot(parcellation, scale)
coords, hemi = nnsurf.find_parcel_centroids(lhannot=annot.lh,
rhannot=annot.rh,
version='fsaverage5',
surf='sphere',
method='surface')
spins = nnstats.gen_spinsamples(coords, hemi, method='vasa',
n_rotate=N_PERM, seed=SEED)
return y[spins]
def hungarian(y, parcellation, scale, fn=None):
y = np.asarray(y)
if USE_CACHED and fn is not None:
spins = simnulls.load_spins(fn, n_perm=N_PERM)
else:
annot = _get_annot(parcellation, scale)
coords, hemi = nnsurf.find_parcel_centroids(lhannot=annot.lh,
rhannot=annot.rh,
version='fsaverage5',
surf='sphere',
method='surface')
spins = nnstats.gen_spinsamples(coords, hemi, method='hungarian',
n_rotate=N_PERM, seed=SEED)
return y[spins]
def baum(y, parcellation, scale, fn=None):
y = np.asarray(y)
if USE_CACHED and fn is not None:
spins = simnulls.load_spins(fn, n_perm=N_PERM)
else:
annot = _get_annot(parcellation, scale)
spins = nnsurf.spin_parcels(lhannot=annot.lh, rhannot=annot.rh,
version='fsaverage5', n_rotate=N_PERM,
seed=SEED)
nulls = y[spins]
nulls[spins == -1] = np.nan
return nulls
def cornblath(y, parcellation, scale, fn=None):
y = np.asarray(y)
annot = _get_annot(parcellation, scale)
spins = simnulls.load_spins(fn, n_perm=N_PERM) if USE_CACHED else None
nulls = nnsurf.spin_data(y, version='fsaverage5', spins=spins,
lhannot=annot.lh, rhannot=annot.rh,
n_rotate=N_PERM, seed=SEED)
return nulls
def get_distmat(hemi, parcellation, scale, fn=None):
if hemi not in ('lh', 'rh'):
raise ValueError(f'Invalid hemishere designation {hemi}')
if USE_CACHED and fn is not None:
fn = DISTDIR / parcellation / 'nomedial' / f'{scale}_{hemi}_dist.npy'
dist = np.load(fn, allow_pickle=False, mmap_mode='c').astype('float32')
else:
surf = nndata.fetch_fsaverage('fsaverage5', data_dir=ROIDIR)['pial']
subj, spath = nnsurf.check_fs_subjid('fsaverage5')
medial = Path(spath) / subj / 'label'
medial_labels = [
'unknown', 'corpuscallosum', '???',
'Background+FreeSurfer_Defined_Medial_Wall'
]
if parcellation == 'vertex':
medial_path = medial / f'{hemi}.Medial_wall.label'
dist = surface.get_surface_distance(getattr(surf, hemi),
medial=medial_path,
use_wb=False,
verbose=True)
else:
annot = _get_annot(parcellation, scale)
dist = surface.get_surface_distance(getattr(surf, hemi),
getattr(annot, hemi),
medial_labels=medial_labels,
use_wb=False,
verbose=True)
return dist
def make_surrogates(data, parcellation, scale, spatnull, fn=None):
if spatnull not in ('burt2018', 'burt2020', 'moran'):
raise ValueError(f'Cannot make surrogates for null method {spatnull}')
darr = np.asarray(data)
dmin = darr[np.logical_not(np.isnan(darr))].min()
surrogates = np.zeros((len(data), N_PERM))
for n, hemi in enumerate(('lh', 'rh')):
dist = get_distmat(hemi, parcellation, scale, fn=fn)
try:
idx = np.asarray([
n for n, f in enumerate(data.index)if f.startswith(hemi)
])
hdata = np.squeeze(np.asarray(data.iloc[idx]))
except AttributeError:
idx = np.arange(n * (len(data) // 2), (n + 1) * (len(data) // 2))
hdata = np.squeeze(data[idx])
# handle NaNs before generating surrogates; should only be relevant
# when using vertex-level data, but good nonetheless
mask = np.logical_not(np.isnan(hdata))
surrogates[idx[np.logical_not(mask)]] = np.nan
hdata, dist, idx = hdata[mask], dist[np.ix_(mask, mask)], idx[mask]
if spatnull == 'burt2018':
# Box-Cox transformation requires positive data
hdata += np.abs(dmin) + 0.1
surrogates[idx] = \
burt.batch_surrogates(dist, hdata, n_surr=N_PERM, seed=SEED)
elif spatnull == 'burt2020':
if parcellation == 'vertex':
index = np.argsort(dist, axis=-1)
dist = np.sort(dist, axis=-1)
surrogates[idx] = \
mapgen.Sampled(hdata, dist, index, seed=SEED)(N_PERM).T
else:
surrogates[idx] = \
mapgen.Base(hdata, dist, seed=SEED)(N_PERM, 50).T
elif spatnull == 'moran':
dist = dist.astype('float64') # required for some reason...
np.fill_diagonal(dist, 1)
dist **= -1
mrs = moran.MoranRandomization(joint=True, n_rep=N_PERM,
tol=1e-6, random_state=SEED)
surrogates[idx] = mrs.fit(dist).randomize(hdata).T
return surrogates
def output_exists(data, parcellation, scale, spatnull, repeat):
"""
Checks whether given combination of inputs already exists in `data`
Returns
-------
exits : bool
Whether outputs have already been run (True) or not (False)
"""
if len(data) == 0:
return False
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
present = data.query(f'parcellation == "{parcellation}" '
f'& scale == "{scale}" '
f'& spatnull == "{spatnull}"')
return len(present) > (repeat)
def make_stripplot(fn):
"""
Makes stripplot of runtime for different spatial null models
Parameters
----------
fn : {'cached.csv', 'uncached.csv'}
Filename to load runtime data from
Returns
-------
ax : matplotlib.pyplot.Axes
Axis with plot
"""
data = | pd.read_csv(DATADIR / fn) | pandas.read_csv |
from typing import Union, Optional
import pytest
import scanpy as sc
import cellrank.external as cre
from anndata import AnnData
from cellrank.tl.kernels import ConnectivityKernel
from cellrank.external.kernels._utils import MarkerGenes
from cellrank.external.kernels._wot_kernel import LastTimePoint
import numpy as np
import pandas as pd
from scipy.sparse import spmatrix, csr_matrix
from pandas.core.dtypes.common import is_categorical_dtype
from matplotlib.cm import get_cmap
from matplotlib.colors import to_hex
class TestOTKernel:
def test_no_connectivities(self, adata_large: AnnData):
del adata_large.obsp["connectivities"]
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
assert ok._conn is None
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
def test_method_not_implemented(self, adata_large: AnnData):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
with pytest.raises(
NotImplementedError, match="Method `'unbal'` is not yet implemented."
):
ok.compute_transition_matrix(1, 0.001, method="unbal")
def test_no_terminal_states(self, adata_large: AnnData):
with pytest.raises(RuntimeError, match="Unable to initialize the kernel."):
cre.kernels.StationaryOTKernel(
adata_large,
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
def test_normal_run(self, adata_large: AnnData):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
assert isinstance(ok, cre.kernels.StationaryOTKernel)
assert isinstance(ok._transition_matrix, csr_matrix)
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
assert isinstance(ok.params, dict)
@pytest.mark.parametrize("connectivity_kernel", (None, ConnectivityKernel))
def test_compute_projection(
self, adata_large: AnnData, connectivity_kernel: Optional[ConnectivityKernel]
):
terminal_states = np.full((adata_large.n_obs,), fill_value=np.nan, dtype=object)
ixs = np.where(adata_large.obs["clusters"] == "Granule immature")[0]
terminal_states[ixs] = "GI"
ok = cre.kernels.StationaryOTKernel(
adata_large,
terminal_states=pd.Series(terminal_states).astype("category"),
g=np.ones((adata_large.n_obs,), dtype=np.float64),
)
ok = ok.compute_transition_matrix(1, 0.001)
if connectivity_kernel is not None:
ck = connectivity_kernel(adata_large).compute_transition_matrix()
combined_kernel = 0.9 * ok + 0.1 * ck
combined_kernel.compute_transition_matrix()
else:
combined_kernel = ok
expected_error = (
r"<StationaryOTKernel> is not a kNN based kernel. The embedding projection "
r"only works for kNN based kernels."
)
with pytest.raises(AttributeError, match=expected_error):
combined_kernel.compute_projection()
class TestWOTKernel:
def test_no_connectivities(self, adata_large: AnnData):
del adata_large.obsp["connectivities"]
ok = cre.kernels.WOTKernel(
adata_large, time_key="age(days)"
).compute_transition_matrix()
assert ok._conn is None
np.testing.assert_allclose(ok.transition_matrix.sum(1), 1.0)
def test_invalid_solver_kwargs(self, adata_large: AnnData):
ok = cre.kernels.WOTKernel(adata_large, time_key="age(days)")
with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"):
ok.compute_transition_matrix(foo="bar")
def test_inversion_updates_adata(self, adata_large: AnnData):
key = "age(days)"
ok = cre.kernels.WOTKernel(adata_large, time_key=key)
assert is_categorical_dtype(adata_large.obs[key])
assert adata_large.obs[key].cat.ordered
np.testing.assert_array_equal(ok.experimental_time, adata_large.obs[key])
orig_time = ok.experimental_time
ok = ~ok
inverted_time = ok.experimental_time
assert | is_categorical_dtype(adata_large.obs[key]) | pandas.core.dtypes.common.is_categorical_dtype |
import time
import pandas as pd
from nltk import collections
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from text_cleaning import get_cleaned_text
def get_data_frame_stats(data_frame):
df_toxic = data_frame.drop(['text'], axis=1)
counts = []
categories = list(df_toxic.columns.values)
for i in categories:
counts.append((i, df_toxic[i].sum()))
df_stats = pd.DataFrame(counts, columns=['category', 'number_of_tweets'])
return df_stats
# topic_ids = ['World', "Business", "Sports", "SciTech"]
# with open('../dataset_news.json') as json_file:
# data = json.load(json_file)
#
# df_input = {'text': [], 'World': [], "Business": [], "Sports": [], "SciTech": []}
#
# for new in data:
# df_input['text'].append(new['content'])
#
# if len(new['annotation']['label']) > 1:
# print(new['content'])
# for topic_id in topic_ids:
# if topic_id == new['annotation']['label'][0]:
# df_input[topic_id].append(1)
# else:
# df_input[topic_id].append(0)
topic_ids = ['traffic', "sport", "work", "culture", "events", "politics"]
df = | pd.read_csv('../dataset.csv', delimiter=';') | pandas.read_csv |
import numpy as np
import glob
import pandas as pd
from datetime import datetime
indir = '/glade/work/lgaudet/research/data/'
timeFormat = '%Y-%m-%d %H:%M:%S UTC'
parseTime = lambda x: datetime.strptime(x, timeFormat)
df = pd.concat([pd.read_csv(f,parse_dates=['time'],date_parser=parseTime) for f in sorted(glob.glob(indir+'2017*.csv'))])
df.set_index(['station','time'], inplace = True)
mtime = pd.to_datetime(df.index.levels[1].values)
STN = df.index.levels[0].values.tolist()
nstn = len(STN)
ntime = len(mtime)
RH = np.zeros((nstn,ntime))
PRECIP_INC = np.zeros((nstn,ntime))
PRECIP_LOC = np.zeros((nstn,ntime))
PRECIP_MAX_INT = np.zeros((nstn,ntime))
TEMP_2M = np.zeros((nstn,ntime))
TEMP_9M = np.zeros((nstn,ntime))
PRESSURE = np.zeros((nstn,ntime))
WIND_DIRECTION = np.zeros((nstn,ntime))
MAX_WIND_SPEED = np.zeros((nstn,ntime))
AVG_WIND_SPEED = np.zeros((nstn,ntime))
for ii, stn in enumerate(STN):
RH[ii,:] = df.loc[stn,'relative_humidity [percent]'].values
PRECIP_INC[ii,:] = df.loc[stn,'precip_incremental [mm]'].values
PRECIP_LOC[ii,:] = df.loc[stn,'precip_local [mm]'].values
PRECIP_MAX_INT[ii,:] = df.loc[stn,'precip_max_intensity [mm/min]'].values
TEMP_2M[ii,:] = df.loc[stn,'temp_2m [degC]'].values
TEMP_9M[ii,:] = df.loc[stn,'temp_9m [degC]'].values
PRESSURE[ii,:] = df.loc[stn,'station_pressure [mbar]'].values
WIND_DIRECTION[ii,:] = df.loc[stn,'wind_direction_prop [degrees]'].values
MAX_WIND_SPEED[ii,:] = df.loc[stn,'max_wind_speed_prop [m/s]'].values
AVG_WIND_SPEED[ii,:] = df.loc[stn,'avg_wind_speed_prop [m/s]'].values
prcp_evol = np.zeros((nstn,ntime))
prcp_save = 0.
for tt in range(0,ntime):
prcp_evol[:,tt] = prcp_save + PRECIP_INC[:,tt]
prcp_save = prcp_evol[:,tt]
df = pd.read_csv(indir+'nysm.csv',index_col=0)
mstn = df.index.values.tolist()
mlat = df['lat [degrees]'].values
mlon = df['lon [degrees]'].values
def resample(var,times1,varname):
df = pd.DataFrame(times1,columns=['date'])
df['%s'%(varname)] = var
df['datetime'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import pandas as pd
import numpy as np
import contextlib
import unittest
import netCDF4
import inspect
import pickle
import json
import sys
import os
import time
from random import seed, randint
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,relpath('../../examples/data/wave')))
class TestResourceSpectrum(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
df = self.f[1] - self.f[0]
Trep = 1/df
self.t = np.arange(0, Trep, 0.05)
@classmethod
def tearDownClass(self):
pass
def test_pierson_moskowitz_spectrum(self):
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
Tp0 = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(self.Tp - Tp0)/self.Tp
self.assertLess(error, 0.01)
def test_bretschneider_spectrum(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_surface_elevation_seed(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
eta0 = wave.resource.surface_elevation(S, self.t)
eta1 = wave.resource.surface_elevation(S, self.t, seed=seednum)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phasing(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
eta0 = wave.resource.surface_elevation(S, self.t)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
np.random.seed(seednum)
phases = np.random.rand(len(S)) * 2 * np.pi
eta1 = wave.resource.surface_elevation(S, self.t, phases=phases)
| assert_frame_equal(eta0, eta1) | pandas.testing.assert_frame_equal |
from cmath import nan
from sqlite3 import DatabaseError
import pandas as pd
import numpy as np
import json
def load_from_csv(path):
dt = pd.read_csv(path, sep=';', dtype={'matricule': object})
return dt.set_index('matricule')
def fix_matricule(matricule):
if matricule.startswith('195'):
return '19' + matricule[3:]
return matricule
def load_from_claco_csv(path):
df = pd.read_csv(path, delimiter=';')
df['matricule'] = df['username'].str.split('@', expand=True)[0]
df['name'] = df['firstname'] + " " + df['lastname']
df['grade'] = df['score'] / df['total_score_on']
df = df[['matricule', 'name', 'grade']]
df['matricule'] = df['matricule'].map(fix_matricule, na_action='ignore')
df = df.dropna(subset=['matricule'])
df = df.set_index('matricule')
return df
def capwords(S):
return ' '.join([w.capitalize() for w in S.split(' ')])
def save(df, path):
df.to_json(path, indent=4, force_ascii=False)
def combine(**kwargs):
res = pd.DataFrame()
for df in kwargs.values():
res = res.combine_first(df[['name']])
for name, df in kwargs.items():
res[name] = df['grade']
res[name] = res[name].fillna(0.0)
return res
def to_plus_ecam_csv(df: pd.DataFrame, activity_code, path=None):
if path is None:
path = activity_code + '.csv'
if 'status' in df:
df = pd.DataFrame(df[['grade', 'status']])
else:
df = pd.DataFrame(df[['grade']])
df['status'] = np.nan
df['stat'] = df['status'].map(to_plus_ecam_stat)
df['cote'] = df['grade']
df['ae'] = activity_code
df = pd.DataFrame(df[['ae', 'cote', 'stat']])
df.to_csv(path, sep=';', encoding='utf8', index_label='matricule')
def to_plus_ecam_stat(status):
if status == 'présent':
return None
if status == 'absent':
return 'a'
if status == 'malade':
return 'm'
return status
def from_auto_correction(path):
with open(path, encoding='utf8') as file:
students = json.load(file)['students']
if 'check' in students[0]:
grades = {student['student']['matricule']: student['check']['grade'] for student in students}
else:
grades = {student['student']['matricule']: student['grade'] for student in students}
names = {student['student']['matricule']: student['student']['name'] for student in students}
grades = pd.Series(grades)
names = | pd.Series(names) | pandas.Series |
#!/usr/bin/env python
"""
Parsing GO Accession from a table file produced by InterProScan and mapping to GOSlim.
(c) <NAME> 2018 / MIT Licence
kinomoto[AT]sakura[DOT]idv[DOT]tw
"""
from __future__ import print_function
from os import path
import sys
import pandas as pd
from goatools.obo_parser import GODag
from goatools.mapslim import mapslim
from joblib import Parallel, delayed
import optparse
p = optparse.OptionParser("%prog [options] <eggnog_diamond_file> <go_obo_file>")
p.add_option("-o", "--out", dest="output_filename", help="Directory to store " "the output file [default: GO_term_annotation.txt]", action="store", type="string", default="GO_term_annotation.txt")
p.add_option("-g", "--goslim", dest="goslim_obo_file", action="store",
help="The .obo file for the most current GO Slim terms "
"[default: Null]", type="string", default=None)
p.add_option("-O", "--goslim_out", dest="goslim_output_filename", action="store", help="Directory to store the output file [default: " "GOSlim_annotation.txt]", type="string", default="GOSlim_annotation.txt")
p.add_option("-t", "--goslim_type", dest="goslim_type", action="store", type="string", default="direct", help="One of `direct` or `all`. Defines "
"whether the output should contain all GOSlim terms (all "
"ancestors) or only direct GOSlim terms (only direct "
"ancestors) [default: direct]")
p.add_option("-s", "--sort", dest="is_sort", action="store_true", default=False, help="Sort the output table [default: False]")
opts, args = p.parse_args()
# check for correct number of arguments
if len(args) != 2:
p.print_help()
sys.exit(1)
interpro_file = args[0]
assert path.exists(interpro_file), "file %s not found!" % interpro_file
obo_file = args[1]
assert path.exists(obo_file), "file %s not found!" % obo_file
# check that --goslim is set
USE_SLIM = False
if (opts.goslim_obo_file is not None):
assert path.exists(opts.goslim_obo_file), "file %s not found!" % opts.goslim_obo_file
USE_SLIM = True
# check that slim_out is either "direct" or "all" and set according flag
if opts.goslim_type.lower() == "direct":
ONLY_DIRECT = True
elif opts.goslim_type.lower() == "all":
ONLY_DIRECT = False
else:
p.print_help()
sys.exit(1)
# load InterProScan_tsv_file
interpro_table = pd.read_csv(interpro_file, sep='\t',skiprows=3,skipfooter=3,engine='python')
#interpro_go = interpro_table[['#query_name', 'GO_terms']]
all_protein=list(interpro_table['#query_name'])
gos=list(interpro_table['GO_terms'])
# load obo files
go = GODag(obo_file, load_obsolete=True)
output_hd = ['Protein Accession', 'GO Category', 'GO Accession', 'GO Description', 'GO Level']
output_table = | pd.DataFrame(columns=output_hd) | pandas.DataFrame |
"""
Functions to clean up neighborhood data
and feed into interactive charts
"""
import numpy as np
import pandas as pd
from datetime import date, timedelta
S3_FILE_PATH = "s3://public-health-dashboard/jhu_covid19/"
NEIGHBORHOOD_URL = f"{S3_FILE_PATH}la-county-neighborhood-time-series.parquet"
CROSSWALK_URL = f"{S3_FILE_PATH}la_neighborhoods_population_crosswalk.parquet"
NEIGHBORHOOD_APPENDED_URL = f"{S3_FILE_PATH}la-county-neighborhood-testing-appended.parquet"
def clean_data():
df = pd.read_parquet(NEIGHBORHOOD_URL)
crosswalk = | pd.read_parquet(CROSSWALK_URL) | pandas.read_parquet |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal( | concat((df1, df2), ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
import sys, os, time
sys.dont_write_bytecode = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from datetime import datetime, timedelta
import tensorflow as tf
import numpy as np
import pandas as pd
import gym
import retro
import retro_contest.local
from policy import Policy
from actor import Actor, process_state
def main():
#train/test vars
recover=True
train_steps = 10000 * 12 #batches per hour * hours
train_time = 0 #seconds
test_render = False
test_episodes = 3
print_interval = 10000 #FIXME saves on this interval also
n_actors = 1
gym_envs = False
#output an int action if true, else one hot action
act_int = True if gym_envs else False
#init env
#def get_env(seed=42):
# if gym_envs:
# env_name = ['CartPole-v0', 'Acrobot-v1'][0]
# env = gym.make(env_name)
# env.seed(seed)
# return env
# else:
# env_name = 'SonicTheHedgehog-Genesis'
# env_state = 'GreenHillZone.Act1'
# env = retro_contest.local.make(game=env_name, state=env_state)
# env.seed(seed)
# return env
#get some info for the policy, init the emulator wrapper
env = Env_Controller('./sonic-train.csv',
init=('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1'))
state = env.reset()
n_actions = env.action_space.n
state_shape = process_state(state).shape
#init the session
with tf.Session() as sess:
#global learner
learner_name = 'learner_global'
learner_policy = Policy(state_shape, n_actions, learner_name,
act_int=act_int, recover=True, sess=sess)
actors = []
for i in range(n_actors):
#FIXME: for now just stay synchronous
#only one emulator allowed per thread
#env = get_env(42 + i)
actor_policy = Policy(state_shape, n_actions,
'actor_%s' % i, act_int=act_int, sess=sess,
pull_scope=learner_name)
actor = Actor(env, actor_policy, learner_policy)
actors.append(actor)
#FIXME: new thread for each actor
end_time = datetime.now() + timedelta(seconds=train_time)
print('[*] training for %ss with %s actors (ending at %s)' % (
train_time, len(actors), '{:%H:%M:%S}'.format(end_time)))
start_time = time.time()
start_step = learner_policy.get_step()
#while(train_time > (time.time() - start_time)):
while(True):
for actor in actors:
batch = actor.run()
learner_policy.train(batch)
step = learner_policy.get_step()
if step % print_interval == 0:
learner_policy.save()
print('[*] learner step: %s' % step)
info = actor.test(episodes=3, render=False)
print(pd.DataFrame.from_dict(data=info
).describe().loc[['min', 'max', 'mean', 'std']])
#FIXME: debug limit train steps
if (step - start_step) >= train_steps:
break
steps_per_s = float(learner_policy.get_step() - start_step) / float(
time.time() - start_time)
steps_per_min = round(steps_per_s * 60, 3)
steps_per_sec = round(steps_per_s, 3)
print('[+] done training: %s steps/min (%s /s)' % (steps_per_min,
steps_per_sec))
learner_policy.save()
#print('learner at: %s' % learner_policy.get_step())
#print('actor at: %s' % actor_policy.get_step())
#test with clean actor, clean environment
print('[*] testing for %s episodes per state' % test_episodes)
#env = get_env(429)
env.keep_env = True
actor_policy = Policy(state_shape, n_actions,
'actor_test', act_int=act_int, sess=sess)
actor = Actor(env, actor_policy, learner_policy)
all_stats = {g: {k:[] for k in env.game_states[g]}
for g in env.game_states.keys()}
for game in env.game_states.keys():
for state in env.game_states[game]:
print('[*] testing %s (%s)' % (game, state))
actor.env.switch_env(game, state)
info = actor.test(episodes=3,
render=test_render)
#extract some basic stats from raw results
all_stats[game][state] = pd.DataFrame.from_dict(data=info
).describe().loc[['min', 'max', 'mean', 'std']]
#change column names to include state name
all_stats[game][state].columns = ['%s_%s' % (state, x)
for x in all_stats[game][state].columns.values]
#print(all_stats[game][state], '\n')
#output stats FIXME: write dfs_concat, calc_* to disk
cols = {'rewards': [], 'steps': []}
for game in all_stats:
print(game, (79-len(game))*'*')
dfs = [all_stats[game][state] for state in all_stats[game]]
dfs_concat = | pd.concat(dfs, axis=1) | pandas.concat |
from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
from scipy import integrate
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(
inputs={"data": DataType.Series, "speed": DataType.Any},
outputs={"result": DataType.Series},
)
def main(*, data, speed):
"""entrypoint function for this component
Usage example:
>>> main(
... data = pd.Series(
... {
... "2019-08-01T15:20:10": 3.0,
... "2019-08-01T15:20:11": 22.0,
... "2019-08-01T15:20:14": 18.0,
... "2019-08-01T15:20:16": 2.0,
... "2019-08-01T15:20:18": 7.0,
... "2019-08-01T15:20:22": 12.0,
... "2019-08-01T15:20:24": 15.0,
... "2019-08-01T15:20:26": 18.0,
... }
... ),
... speed = 5
... )["result"]
0.0 3.0
5.0 22.0
20.0 18.0
30.0 2.0
40.0 7.0
60.0 12.0
70.0 15.0
80.0 18.0
dtype: float64
>>> main(
... data = pd.Series(
... {
... "2019-08-01T15:20:10": 3.0,
... "2019-08-01T15:20:11": 22.0,
... "2019-08-01T15:20:14": 18.0,
... "2019-08-01T15:20:16": 2.0,
... "2019-08-01T15:20:18": 7.0,
... "2019-08-01T15:20:22": 12.0,
... "2019-08-01T15:20:24": 15.0,
... "2019-08-01T15:20:26": 18.0,
... }
... ),
... speed = pd.Series(
... {
... "2019-08-01T15:20:10": 1.0,
... "2019-08-01T15:20:11": 3.0,
... "2019-08-01T15:20:14": 4.0,
... "2019-08-01T15:20:16": 2.0,
... "2019-08-01T15:20:18": 0.0,
... "2019-08-01T15:20:22": 2.0,
... "2019-08-01T15:20:24": 4.0,
... "2019-08-01T15:20:26": 5.0,
... }
... )
... )["result"]
0.0 3.0
2.0 22.0
12.5 18.0
18.5 2.0
20.5 7.0
24.5 12.0
30.5 15.0
39.5 18.0
dtype: float64
"""
# ***** DO NOT EDIT LINES ABOVE *****
data_sort = data.copy()
try:
data_sort.index = pd.to_datetime(data_sort.index)
except (ValueError, TypeError):
raise TypeError("indices of data must be datetime")
if isinstance(speed, (int, float, bool)):
data_sort = data_sort.sort_index()
time_norm = (data_sort.index - data_sort.index[0]).total_seconds()
length = | pd.Series(speed * time_norm, index=data_sort.index) | pandas.Series |
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
"""pandera schema of the parquet test dataset"""
registration_dttm: pa.typing.Series[pa.typing.DateTime]
id: pa.typing.Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True)
first_name: pa.typing.Series[pa.typing.String]
last_name: pa.typing.Series[pa.typing.String]
email: pa.typing.Series[pa.typing.String]
gender: pa.typing.Series[pa.typing.String] = pa.Field(coerce=True)
ip_address: pa.typing.Series[pa.typing.String]
cc: pa.typing.Series[pa.typing.String]
country: pa.typing.Series[pa.typing.String]
birthdate: pa.typing.Series[pa.typing.String]
salary: pa.typing.Series[pa.typing.Float64] = pa.Field(nullable=True)
title: pa.typing.Series[pa.typing.String]
comments: pa.typing.Series[pa.typing.String] = pa.Field(nullable=True)
@staticmethod
def length():
"""Known length of the data"""
return 5000
@staticmethod
def n_salary_over_150000():
"""Number of rows with salary > 150000"""
return 2384
@pytest.fixture(params=["multifile", "singlefile.parquet", "multifolder"])
def sample_data_path(request):
"""Path of a parquet dataset for testing"""
return Path(__file__).parent / "data" / "parquet" / request.param
def read_sample_dataframe():
"""Read the sample dataframe to pandas and return a cached copy"""
if not hasattr(read_sample_dataframe, "df"):
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)
return read_sample_dataframe.df.copy()
@pytest.fixture(scope="function")
def sample_dataframe():
"""Provide the sample dataframe"""
return read_sample_dataframe()
@pytest.fixture(scope="function")
def sample_dataframe_dict():
"""Provide the sample dataframe"""
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
return backend.read_to_dict(parquet_file.name)
@pytest.mark.parametrize(
"kwargs, exception",
[
({"base_path": "/some/dir", "partitions": -1}, TypeError),
({"base_path": "/some/dir", "partitions": 2.2}, TypeError),
({"base_path": "/some/dir", "partitions": "abc"}, TypeError),
({"base_path": "/some/dir", "partitions": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": 1.1}, TypeError),
({"base_path": "/some/dir", "rows_per_file": -5}, ValueError),
],
)
def test_init_argchecks(kwargs, exception):
"""Challenge the argument validation of the constructor"""
with pytest.raises(exception):
dframeio.ParquetBackend(**kwargs)
def test_read_to_pandas(sample_data_path):
"""Read a sample dataset into a pandas dataframe"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_columns(sample_data_path):
"""Read a sample dataset into a pandas dataframe, selecting some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, columns=["id", "first_name"])
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_rows(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, row_filter="salary > 150000")
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_pandas_sample(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, sample=10)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
@pytest.mark.parametrize("limit", [0, 10])
def test_read_to_pandas_limit(sample_data_path, limit):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, limit=limit)
SampleDataSchema.to_schema().validate(df)
assert len(df) == limit
def test_read_to_pandas_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_pandas("/tmp")
def test_read_to_dict(sample_data_path):
"""Read a sample dataset into a dictionary"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_columns(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"])
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_rows(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, row_filter="salary > 150000")
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_dict_limit(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"], limit=10)
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == 10
def test_read_to_dict_sample(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, sample=10)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
def test_read_to_dict_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_dict("/tmp")
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir))
backend.write_replace("data.parquet", sample_dataframe)
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data.parquet")
assert_frame_equal(dataframe_after, sample_dataframe)
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df_multifile(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), rows_per_file=1000)
backend.write_replace("data", sample_dataframe)
assert sum(1 for _ in (tempdir / "data").glob("*")) == 5, "There should be 5 files"
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data")
assert_frame_equal(dataframe_after, sample_dataframe)
@pytest.mark.parametrize("old_content", [False, True])
def test_write_replace_df_partitioned(sample_dataframe, tmp_path_factory, old_content):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
if old_content:
(tempdir / "data").mkdir()
(tempdir / "data" / "old.parquet").open("w").close()
backend = dframeio.ParquetBackend(str(tempdir), partitions=["gender"])
backend.write_replace("data", sample_dataframe)
created_partitions = {f.name for f in (tempdir / "data").glob("*=*")}
assert created_partitions == {"gender=", "gender=Female", "gender=Male"}
if old_content:
assert not (tempdir / "data" / "old.parquet").exists()
backend2 = dframeio.ParquetBackend(str(tempdir))
dataframe_after = backend2.read_to_pandas("data")
# It is o.k. to get the partition keys back as categoricals, because
# that's more efficient. For comparison we make the column string again.
dataframe_after = dataframe_after.assign(gender=dataframe_after["gender"].astype(str))
assert_frame_equal(
dataframe_after,
sample_dataframe,
check_like=True,
)
@pytest.mark.parametrize("partitions", [[5], ["foobar"]])
def test_write_replace_df_invalid_partitions(tmp_path_factory, partitions):
"""Write the dataframe, read it again and check identity"""
tempdir = tmp_path_factory.mktemp("test_write_replace_df")
backend = dframeio.ParquetBackend(str(tempdir), partitions=partitions)
with pytest.raises(ValueError):
backend.write_replace("data.parquet", | pd.DataFrame() | pandas.DataFrame |
import pandas as pd # version 1.0.1
# in pandas 1.1.4 dates for INTESA and BMG doesn't work after merge in "final"
from datetime import datetime
# TODO find repetitions and replace them with functions
# for example Santander and CITI files import and adjustment
# or date and amount formatting
pd.options.display.float_format = '{:,.2f}'.format
# don't hide columns
pd.set_option('display.expand_frame_repr', False)
# import list of bank accounts into data frame
balances = pd.read_csv('list_of_accounts.csv', sep=';')
balances = balances.set_index('Account')
print('tabelka z listą wszystkich aktywnych rachunków\n')
print(balances)
print('\nlista kolumn\n')
print(balances.columns)
print()
# import bits of information from ING
ing = pd.read_csv('ING_transakcje_zamkniecie.csv', sep=';', encoding='ANSI',
usecols=['rachunek ING "NRB" (26 znaków)', 'saldo końcowe',
'waluta operacji', 'data wyciągu'])
print('\nUWAGA: plik z ING czasami zawiera błedy. Kolumny się rozjeżdzają. '
'Trzeba poprawić w excelu.\n')
print('\nING bez przeróbek\n')
print(ing)
ing = ing.rename(columns={'rachunek ING "NRB" (26 znaków)': "Account",
"saldo końcowe": "saldo",
'data wyciągu': 'data',
'waluta operacji': 'Currency'})
ing = ing.set_index('Account')
# amount format adjusted
# empty cells need te be removed before next steps to avoid errors
ing = ing.dropna(subset=['saldo'])
ing['saldo'] = ing['saldo'].apply(lambda x: x.replace(',', '.'))
ing['saldo'] = pd.to_numeric(ing['saldo'])
# date format adjusted
ing['data'] = pd.to_datetime(ing['data'], format='%y-%m-%d')
# sorting is necessary to catch the newest values
ing.sort_values(by=['data'], inplace=True, ascending=False)
print()
# index has to be removed for a while to delete the duplicates
ing = ing.reset_index().drop_duplicates(subset='Account',
keep='first').set_index('Account')
print('\nDane z ING bez duplikatów wedłgu powtórzeń w indeksie\n', ing, '\n')
# import bits of information from CITI bank
citifilename = 'CITI_salda_zamkniecie.csv'
colnames = ['Account', 'klient', 'saldo', 'Currency', 'data',
'nazwa_rach', 'nazawa_od', 'oddzial']
citi = pd.read_csv(citifilename, names=colnames, skiprows=1,
parse_dates=True, dayfirst=True)
citi = citi.drop(['klient', 'nazwa_rach', 'nazawa_od', 'oddzial'], axis=1)
# date format adjusted
citidtm = lambda x: datetime.strptime(str(x), "%d/%m/%Y") # 02/08/2019
citi['data'] = citi['data'].apply(citidtm)
citi['data'] = pd.to_datetime(citi['data'])
citi['Account'] = citi['Account'].apply(lambda x: x.replace(' ', ''))
citi = citi.set_index('Account')
print('\nsprawdzam co się wczytuje z CITI\n', citi, '\n')
# import bits of information from Santander bank
# "skiprows" need to be updated if we close or open some bank accounts
santanderfilename = 'Santander_salda_zamkniecie.csv'
san = pd.read_csv(santanderfilename, skiprows=[0, 1, 17, 18, 19],
usecols=['Data', 'Numer rachunku', 'Saldo', 'Unnamed: 8'],
parse_dates=True, sep=';', encoding='ANSI', )
santandervatfilename = 'Santander_VAT_salda_zamkniecie.csv'
sanvat = pd.read_csv(santandervatfilename, skiprows=[0, 1, 6, 7, 8],
usecols=['Data', 'Numer rachunku', 'Saldo', 'Unnamed: 8'],
parse_dates=True, sep=';', encoding='ANSI', )
san_tot = pd.concat([san,sanvat])
san_tot = san_tot.rename(columns={'Numer rachunku': "Account",
"Saldo": "saldo",
'Data': 'data',
'Unnamed: 8': 'Currency'})
san_tot['saldo'] = san_tot['saldo'].apply(lambda x: x.replace(' ', ''))
san_tot['saldo'] = san_tot['saldo'].apply(lambda x: x.replace(',', '.'))
san_tot['saldo'] = pd.to_numeric(san_tot['saldo'])
san_tot['Account'] = san_tot['Account'].apply(lambda x: x.replace(' ', ''))
san_tot = san_tot.set_index('Account')
san_tot['data'] = pd.to_datetime(san_tot['data'], format='%Y-%m-%d')
# In Santander file the date is only in the first row.
# It must be added into the next rows
san_tot['data'] = san_tot.fillna(method="ffill")
print()
print('sprawdzam co mamy w Santanderze\n', san_tot, '\n')
# import bits of information from Santander bank
bmgfilename = 'BMG_salda_zamkniecie.csv'
bmg = pd.read_csv(bmgfilename, skiprows=range(0, 15),
usecols=['Account number', 'Currency',
'Closing', 'Closing book balance'],
parse_dates=True, sep=';', encoding='ANSI', )
bmg = bmg.rename(
columns={'Account number': "Account", "Closing book balance": "saldo",
'Closing': 'data'})
bmg = bmg.set_index('Account')
bmg['data'] = pd.to_datetime(bmg['data'],
format='%Y-%m-%d')
print('\nsprawdzam co się wczytuje z BMG\n\n', bmg, '\n\n')
# import bits of information from INTESA bank
intesafilename = 'INTESA_salda_zamkniecie.csv'
intesa = pd.read_csv(intesafilename, parse_dates=True, sep=';', encoding='ANSI')
intesa = intesa.set_index('Account')
intesa['data'] = pd.to_datetime(intesa['data'], format='%Y-%m-%d')
print('\nsprawdzam co się wczytuje z INTESY\n\n', intesa, '\n\n')
# merge all tables
print('\npołączone tabele\n')
final = balances.merge(ing[['data', 'saldo']], on='Account', how='outer')
final = final.fillna(citi)
final = final.fillna(san_tot)
final = final.fillna(bmg)
final = final.fillna(intesa)
# date format corrected
final['data'] = pd.to_datetime(final['data'].dt.strftime('%Y/%m/%d'))
print(final)
# Add deposits from CITI
citidepofilename = 'CITI_depozyty_zamkniecie.csv'
colnames = ['Account', 'klient', 'depozyt', 'Currency', 'opis', 'beneficjent',
'data', 'data_waluty', 'bzdet_1', 'bzdet_2']
citidep = pd.read_csv(citidepofilename, names=colnames, skiprows=1,
parse_dates=True, dayfirst=True)
citidep = citidep.drop(
['klient', 'Currency', 'opis', 'beneficjent', 'data_waluty', 'bzdet_1',
'bzdet_2'], axis=1)
citidtm = lambda x: datetime.strptime(str(x), "%d/%m/%Y")
citidep['data'] = citidep['data'].apply(citidtm)
citidep['data'] = | pd.to_datetime(citidep['data']) | pandas.to_datetime |
import pandas as pd
import os
import sys
import numpy as np
import argparse
import librosa
import soundfile as sf
def shift_pitch(data, sampling_rate, pitch_factor):
# negative pitch factor makes the voice sound lower
# positive pitch factor makes the voice sound higher
return librosa.effects.pitch_shift(data, sr=sampling_rate, n_steps=pitch_factor)
# stretching the sound
def stretch(data, rate=1):
return librosa.effects.time_stretch(data, rate=rate)
def create_augment_wav_files(df, pitch_factors, data_dir, output_dir, spk_id):
augment_wav_output_dir = output_dir
new_list = []
for idx, row in df.iterrows():
wav_file_path = os.path.join(data_dir, row["音檔"])
utt_id = row["音檔"].split("/")[-1].split(".")[0]
row["講者"] = spk_id
# add original row to new dataframe
new_list.append(row)
# Load the audio as a waveform `y`
# Store the sampling rate as `sr`
y, sr = librosa.load(wav_file_path)
for n_step in pitch_factors:
# string to int
n_step = int(n_step)
aug_utt_id = f"{utt_id}_shift{abs(n_step)}"
aug_spk_id = f"{spk_id}_shift{abs(n_step)}"
out_wav_file_path = os.path.join(augment_wav_output_dir, aug_utt_id+".wav")
shifted_y = shift_pitch(y, sr, n_step)
# Write out audio as 24bit PCM WAV
sf.write(out_wav_file_path, shifted_y, sr, subtype='PCM_24')
# new row
new_row = row.copy()
new_row["音檔"] = os.path.join("augmented", aug_utt_id+".wav")
new_row["講者"] = aug_spk_id
# add row to new dataframe
new_list.append(new_row)
new_df = pd.DataFrame(new_list)
return new_df
def main(args):
# downloads/0.2.1/augmented
augment_wav_output_dir = os.path.join(args.data_dir, "augmented")
if not os.path.isdir(augment_wav_output_dir):
print(f"Create directory:{augment_wav_output_dir}")
os.makedirs(augment_wav_output_dir)
df = pd.read_csv(os.path.join(args.data_dir, "SuiSiann.csv"))
# suisiann only has one speaker
spk_id = "spk001"
new_list = []
for idx, row in df.iterrows():
wav_file_path = os.path.join(args.data_dir, row["音檔"])
utt_id = row["音檔"].split("/")[-1].split(".")[0]
row["講者"] = spk_id
# add original row to new dataframe
new_list.append(row)
# Load the audio as a waveform `y`
# Store the sampling rate as `sr`
y, sr = librosa.load(wav_file_path)
for n_step in args.pitch_factors:
# string to int
n_step = int(n_step)
aug_utt_id = f"{utt_id}_shift{abs(n_step)}"
aug_spk_id = f"{spk_id}_shift{abs(n_step)}"
out_wav_file_path = os.path.join(augment_wav_output_dir, aug_utt_id+".wav")
shifted_y = shift_pitch(y, sr, n_step)
# Write out audio as 24bit PCM WAV
sf.write(out_wav_file_path, shifted_y, sr, subtype='PCM_24')
# new row
new_row = row.copy()
new_row["音檔"] = os.path.join("augmented", aug_utt_id+".wav")
new_row["講者"] = aug_spk_id
# add row to new dataframe
new_list.append(new_row)
# save to new csv file
new_df = | pd.DataFrame(new_list) | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
import os
import sys
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import pandas as pd
from brainsmash.workbench.geo import volume
from brainsmash.mapgen.eval import sampled_fit
from brainsmash.mapgen.sampled import Sampled
LAST_SES = 10 # 10
ATLAS_LIST = ['Mutsaerts2015-hem', 'Yeo2011-7', 'Subcortical'] # 'Mutsaerts2015-sub', 'Mutsaerts2015', 'Schaefer2018-100',
SET_DPI = 100
FIGSIZE = (18, 10)
COLOURS = ['#1f77b4ff', '#2ca02cff', '#d62728ff', '#ff7f0eff', '#ff33ccff']
ATLAS_FILE = {'Mutsaerts2015': 'ATTbasedFlowTerritories_resamp_2.5mm',
'Mutsaerts2015-hem': 'ATTbasedFlowTerritories_resamp_2.5mm_hem',
'Mutsaerts2015-sub': 'ATTbasedFlowTerritories_resamp_2.5mm_sub_ceb',
'Subcortical': 'subcortical_cerebellum_2.5mm',
'Yeo2011-7': 'Yeo2011-7_2.5mm',
'Schaefer2018-100': 'Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2.5mm'}
ATLAS_DICT = {'Mutsaerts2015': 'Mutsaerts (vascular)',
'Mutsaerts2015-hem': 'Mutsaerts (vascular)',
'Mutsaerts2015-sub': 'Mutsaerts (vascular + subcortical)',
'Subcortical': 'Subcortical + cerebellum',
'Yeo2011-7': 'Yeo (functional)',
'Schaefer2018-100': 'Schaefer (functional)'}
ATLAS_FOLDER = os.path.join('CVR_reliability', 'Atlas_comparison')
LAST_SES += 1
DISTMAP = '/home/nemo/Scrivania/Test_workbench/CVR_reliability/Atlas_comparison/mmdist/distmat.npy'
INDEX = '/home/nemo/Scrivania/Test_workbench/CVR_reliability/Atlas_comparison/mmdist/index.npy'
SURROGATES_PR = '/home/nemo/Scrivania/Test_workbench/CVR_reliability/Atlas_comparison/surrogates_'
#########
# Utils #
#########
def _get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-in', '--input-file',
dest='data_fname',
type=str,
help='The map you want to scramble',
required=True)
parser.add_argument('-type', '--input-type',
dest='data_content',
type=str,
help='The type of data represented in the map you want '
'to scramble',
default='')
parser.add_argument('-wdr', '--workdir',
dest='wdr',
type=str,
help='Workdir.',
default='/data')
parser.add_argument('-sdr', '--scriptdir',
dest='scriptdir',
type=str,
help='Script directory.',
default='/scripts')
parser.add_argument('-ow', '--overwrite',
dest='overwrite',
action='store_true',
help='Overwrite previously computed distances.',
default=False)
parser.add_argument('-nm', '--num-null-maps',
dest='null_maps',
type=int,
help='Number of surrogate maps to generate. '
'Default is 1000.',
default=1000)
parser.add_argument('-pn', '--plotname',
dest='plot_name',
type=str,
help='Plot name. Default: plots/plot.',
default='')
parser.add_argument('-en', '--exportname',
dest='export_name',
type=str,
help='Name of nifti quantile export. Default: empty.',
default='')
parser.add_argument('-nj', '--jobnumber',
dest='n_jobs',
type=int,
help='Number of jobs to use to parallelise computation. Default: 1.',
default=1)
# Workflows
parser.add_argument('-ga', '--genatlas',
dest='genatlas',
action='store_true',
help='Generate atlas dictionary.',
default=False)
parser.add_argument('-cd', '--compdist',
dest='compdist',
action='store_true',
help='Compute distance and create memory-mapped file.',
default=False)
parser.add_argument('-ev', '--evalvars',
dest='evalvars',
action='store_true',
help='Evaluate variograms.',
default=False)
parser.add_argument('-gs', '--gensurr',
dest='gensurr',
action='store_true',
help='Generate surrogates and plots.',
default=False)
parser.add_argument('-pa', '--parseavg',
dest='parseavg',
action='store_true',
help='parse averages.',
default=False)
parser.add_argument('-pp', '--plotparc',
dest='plotparc',
action='store_true',
help='Generate plots.',
default=False)
parser.add_argument('-eq', '--exportrank',
dest='exportqnt',
action='store_true',
help='Generate rank niftis.',
default=False)
return parser
def export_file(wdr, fname, ex_object):
ex_file = os.path.join(wdr, ATLAS_FOLDER, fname)
os.makedirs(os.path.join(wdr, ATLAS_FOLDER), exist_ok=True)
np.savez_compressed(ex_file, ex_object)
def check_file(wdr, fname):
in_file = os.path.join(wdr, ATLAS_FOLDER, fname)
return os.path.isfile(in_file)
def load_file(wdr, fname):
in_file = os.path.join(wdr, ATLAS_FOLDER, fname)
read_in = np.load(in_file, allow_pickle=True)['arr_0']
if read_in.shape == ():
return read_in[..., np.newaxis][0]
else:
return read_in
def load_and_mask_nifti(data_fname, atlases):
data_img = nib.load(f'{data_fname}.nii.gz')
data = data_img.get_fdata()
if len(data.shape) == 5:
data = data[:, :, :, 0, 0]
elif len(data.shape) == 4:
data = data[:, :, :, 0]
elif len(data.shape) > 5:
raise Exception('Something is wrong with the nifti dimensions')
return data[atlases['intersect'] > 0]
#############
# Workflows #
#############
def generate_atlas_dictionary(wdr, scriptdir, overwrite=False):
# Check that you really need to do this
if overwrite is True or check_file(wdr, 'atlases.npz') is False:
# Create data dictionary
atlases = dict.fromkeys(ATLAS_LIST)
print('Read and intersect atlases')
# Read atlases
for atlas in ATLAS_LIST:
atlas_img = nib.load(os.path.join(scriptdir, '90.template',
f'{ATLAS_FILE[atlas]}.nii.gz'))
atlases[atlas] = atlas_img.get_fdata()
# Create intersection of atlases
atlases['intersect'] = atlases[ATLAS_LIST[0]].copy()
for atlas in ATLAS_LIST[1:]:
atlases['intersect'] = atlases['intersect'] + atlases[atlas]
# Export atlases
export_file(wdr, 'atlases', atlases)
else:
print(f'Found existing atlases dictionary in {wdr}, '
'loading instead of generating.')
atlases = load_file(wdr, 'atlases.npz')
return atlases
def compute_distances(wdr, atlases, overwrite=False):
# Check that you really need to do this
# distmap = os.path.join('mmdist', 'distmap.npy')
distmap = DISTMAP
# if overwrite is True or check_file(wdr, distmap) is False:
if overwrite is True or os.path.isfile(distmap) is False:
coord_dir = os.path.join(wdr, ATLAS_FOLDER, 'mmdist')
# Create folders
os.makedirs(coord_dir, exist_ok=True)
print('Computing volume distance')
# Get position of the voxels in the atlas intersection
coordinates = np.asarray(np.where(atlases['intersect'] > 0)).transpose()
dist_fname = volume(coordinates, coord_dir)
else:
# distmap = os.path.join(wdr, ATLAS_FOLDER, distmap)
# index = os.path.join(wdr, ATLAS_FOLDER, 'mmdist', 'index.npy')
index = INDEX
print('Distance memory mapped file already exists. Skip computation!')
dist_fname = {'D': distmap, 'index': index}
return dist_fname
def evaluate_variograms(data_fname, atlases, dist_fname, wdr, **kwargs):
# Read data and feed surrogate maps
data_masked = load_and_mask_nifti(data_fname, atlases)
print(f'Evaluating variogram for {data_fname}')
sampled_fit(x=data_masked, D=dist_fname['D'], index=dist_fname['index'],
nsurr=50, **kwargs)
ex_file = os.path.join(wdr, ATLAS_FOLDER, f'{data_fname}_variogram.png')
plt.savefig(ex_file)
plt.close('all')
def generate_surrogates(data_fname, atlases, dist_fname, n_jobs, null_maps, wdr, overwrite=False):
data_masked = load_and_mask_nifti(data_fname, atlases)
# Check that data_fname doesn't contain folders.
data_fname = os.path.basename(data_fname)
surrogate_fname = f'surrogates_{data_fname}'
# if overwrite is True or os.path.isfile(f'{surrogate_fname}.npz') is False:
if overwrite is True or check_file(wdr, f'{surrogate_fname}.npz') is False:
# Read data and feed surrogate maps
print(f'Start surrogates for {data_fname}')
gen = Sampled(x=data_masked, D=dist_fname['D'],
index=dist_fname['index'], seed=42, n_jobs=n_jobs)
surrogate_maps = gen(n=null_maps)
# Export atlases
export_file(wdr, surrogate_fname, surrogate_maps)
print('Resample surrogates')
sorted_map = np.sort(data_masked)
ii = np.argsort(surrogate_maps)
surrogate_resamp = sorted_map[ii]
export_file(wdr, f'{surrogate_fname}_resamp', surrogate_resamp)
else:
print(f'Surrogates found at {surrogate_fname}_resamp.npz. Loading.')
surrogate_resamp = load_file(wdr, f'{surrogate_fname}_resamp.npz')
return surrogate_resamp, data_masked
def parse_averages(wdr, data_fname, null_maps, atlases, surrogate_maps, data_masked, overwrite=False):
# Compute averages and store them in pandas dataframes
# Then compute rank and store them in other pd.DataFrame
# if overwrite is True or os.path.isfile(f'{surrogate_fname}.npz') is False:
if overwrite is True or check_file(wdr, f'{data_fname}_relvar_no_resamp.npz') is False:
# Setup pandas df
print('Computing averages')
df_dict = dict.fromkeys(ATLAS_LIST)
rank_dict = dict.fromkeys(ATLAS_LIST)
for atlas in ATLAS_LIST:
# Mask atlas to match data_masked
atlas_masked = atlases[atlas][atlases['intersect'] > 0]
# Find unique values (labels) and remove zero
unique, _ = np.unique(atlas_masked, return_counts=True)
unique = unique[unique > 0]
# Initialise dataframe and dictionary for series
df_dict[atlas] = | pd.DataFrame(index=unique) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
# http://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
# Load .csv file
path = 'german/german_final.csv'
data = pd.read_csv(path, header=None)
print(data)
# One-hot-encoding of categorical attributes
# https://stackoverflow.com/questions/37292872/how-can-i-one-hot-encode-in-python
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
attributes_to_encode = [0,2,3,5,6,9,11,13,14,16,18,19]
for attribute in attributes_to_encode:
data = encode_and_bind(data, attribute)
# Group classes (i.e. [A91, A93, A94] as male (0), [A92, A95] as female (1))
data[8] = data[8].map({'A91':0, 'A92':1, 'A93':0, 'A94':0, 'A95':1})
# To increase readibility, map a good risk value (1) to 0 and a bad risk value (2) to 1
data[20] = data[20].map({1: 0, 2:1})
print('Mapped Dataset:\n')
print(data)
# Check NaN rows and drop them
data.dropna(inplace=True)
# Shuffle Dataset
#data_shuffled = data.sample(frac=1, random_state=0)
# Create advantaged and disadvantaged groups: if it's a male (1) map to 0, if it's a female (2) map to 1
group_label = data[8].to_numpy()
print(f'group_label shape: {group_label.shape}\n')
print(f'group_label: {group_label}\n')
# Standardize
data_normalized=(data-data.mean())/data.std()
print(f'{data_normalized}\n')
# Save label column
data_normalized[20] = data[20]
# Move label column to last column
label = data_normalized.pop(20)
data_normalized = | pd.concat([data_normalized, label], 1) | pandas.concat |
# IPython log file
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstart', '')
import pandas as pd
import pandas as pd
from pandas import Series,DataFrame
obj = Series(['c','a','d','a','a','b','b','c','c'])
obj
uniques = obj.unique()
uniques = obj.unique()
uniques
obj.value_counts
obj.value_counts()
obj.values()
obj.values
| pd.value_counts(obj.values,sort=False) | pandas.value_counts |
# authors_name = '<NAME>'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '<EMAIL>'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = | pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features) | pandas.DataFrame |
import traceback
import numpy as np
from skimage import exposure
import cv2
import tifffile
import os
from glob2 import glob
import pandas as pd
import mat4py
import datetime
import json
import matplotlib.pyplot as plt
import hashlib
# from napari_akseg._utils_imagej import read_imagej_file
from skimage import data
from skimage.registration import phase_cross_correlation
from skimage.registration._phase_cross_correlation import _upsampled_dft
from scipy.ndimage import fourier_shift
import scipy
# from napari_akseg._utils_cellpose import export_cellpose
# from napari_akseg._utils_oufti import export_oufti
# from napari_akseg._utils_imagej import export_imagej
# from napari_akseg._utils_json import import_coco_json, export_coco_json
import pickle
import xmltodict
import warnings
from astropy.io import fits
def read_xml(paths):
files = {}
for path in paths:
with open(path) as fd:
dat = xmltodict.parse(fd.read())["OME"]
for i in range(len(dat["Image"])):
img = dat["Image"][i]
objective_id = int(img["ObjectiveSettings"]["@ID"].split(":")[-1])
objective_dat = dat["Instrument"]["Objective"][objective_id]
objective_mag = float(objective_dat["@NominalMagnification"])
objective_na = float(objective_dat["@LensNA"])
pixel_size = float(img["Pixels"]["@PhysicalSizeX"])
position = i
microscope = "ScanR"
light_source = "LED"
channel_dict = {}
for j in range(len(img["Pixels"]["Channel"])):
channel_data = img["Pixels"]["Channel"][j]
channel_dict[j] = dict(modality = channel_data["@IlluminationType"],
channel = channel_data["@Name"],
mode = channel_data["@AcquisitionMode"])
primary_channel = ""
for j in range(len(img["Pixels"]["TiffData"])):
num_channels = img["Pixels"]["@SizeC"]
num_zstack = img["Pixels"]["@SizeZ"]
tiff_data = img["Pixels"]["TiffData"][j]
file_name = tiff_data["UUID"]["@FileName"]
file_path = os.path.abspath(path.replace(os.path.basename(path),file_name))
try:
plane_data = img["Pixels"]["Plane"][j]
channel_dat = channel_dict[int(plane_data["@TheC"])]
modality = channel_dat["modality"]
channel = channel_dat["channel"]
exposure_time = plane_data["@ExposureTime"]
posX = float(plane_data["@PositionX"])
posY = float(plane_data["@PositionY"])
posZ = float(plane_data["@PositionZ"])
except:
modality = None
channel = None
exposure_time = None
posX = None
posY = None
posZ = None
files[file_path] = dict(file_name = file_name,
position = position,
microscope = microscope,
light_source = light_source,
channel = channel,
modality = modality,
pixel_size = pixel_size,
objective_magnification = objective_mag,
objective_na = objective_na,
posX = posX,
posY = posY,
posZ = posZ)
return files
def read_scanr_directory(self, path):
if isinstance(path, list) == False:
path = [path]
if len(path) == 1:
path = os.path.abspath(path[0])
if os.path.isfile(path) == True:
file_paths = [path]
else:
file_paths = glob(path + "*\**\*.tif", recursive=True)
else:
file_paths = path
scanR_meta_files = [path.replace(os.path.basename(path),"") for path in file_paths]
scanR_meta_files = np.unique(scanR_meta_files).tolist()
scanR_meta_files = [glob(path + "*.ome.xml")[0] for path in scanR_meta_files if len(glob(path + "*.ome.xml")) > 0]
file_info = read_xml(scanR_meta_files)
files = []
for path in file_paths:
try:
file = file_info[path]
file["path"] = path
folder = path.split("\\")[-3]
parent_folder = path.split("\\")[-4]
file["folder"] = folder
file["parent_folder"] = parent_folder
files.append(file)
except:
pass
files = | pd.DataFrame(files) | pandas.DataFrame |
import glob
import os
import pandas as pd
from sklearn.preprocessing import StandardScaler
from main.src.python.config import data_path
from main.src.python.config import config_path
from main.src.python.download.index_file import IndexFile
class ParallelReader:
def __init__(self, start, end, read=True, reduce=False):
self.df_dict = {}
self.stacked_df_dict = {}
self.adjusted_df_dict = {}
self.data = None
if read:
self.read(start, end)
self.stack()
self.adjust()
if reduce:
self.reduce()
self.concat()
@staticmethod
def all_instruments():
with open(os.path.join(config_path, "instruments.txt")) as f:
content = f.readlines()
return [x.strip() for x in content]
def read(self, start, end):
print("Reading data frames")
instruments = ParallelReader.all_instruments()
for instrument in instruments:
paths = glob.glob("{}/{}/*.pickle".format(data_path, instrument))
for path in paths:
file = IndexFile(path)
if file.needed_between(start, end):
self.df_dict.setdefault(instrument, []).append(file.read_between(start, end))
def stack(self):
print("Stacking read data frames")
for instrument in self.df_dict.keys():
frames = self.df_dict[instrument]
concatenated_frames = pd.concat(frames)
removed_dupes = concatenated_frames[~concatenated_frames.index.duplicated(keep="first")]
self.stacked_df_dict[instrument] = removed_dupes
def adjust(self):
print("Adjusting stacked data frames")
frames = [x for x in self.stacked_df_dict.values()]
concatenated_frames = | pd.concat(frames) | pandas.concat |
from imblearn import under_sampling
from qiime2.plugin import (Str, Int)
import biom
from q2_feature_engineering._tada.logger import LOG
from qiime2 import NumericMetadataColumn
import numpy as np
import pandas as pd
import qiime2
import tempfile
import shutil
dispatcher = {'RandomUnderSampler': under_sampling.RandomUnderSampler}
def _sort_metada(targets_metadata, biom_table):
targets = targets_metadata.to_dataframe()
# filter features and targest so samples match
index = set(targets.index)
index = [ix for ix in biom_table.ids('sample') if ix in index]
targets = targets.loc[index]
feature_data = biom_table.filter(index, inplace=False)
return targets, feature_data
def _read_inputs(biom_table: biom.Table, meta_data: NumericMetadataColumn = None):
if meta_data:
meta, biom_table = _sort_metada(meta_data, biom_table)
y = meta.iloc[:, 0]
samples = meta.index
else:
samples = biom_table.ids('sample')
y = pd.DataFrame(data=np.asarray(np.ones((len(samples), 1))).ravel(), index=samples)
_table = biom_table.sort_order(axis='sample', order=samples)
if np.sum(samples != _table.ids('sample')) > 0:
raise ValueError("The samples IDs in meta data and biom table are not the same! The difference is:",
set(samples) - set(_table.ids('sample')), "Please double check.")
return _table, y
def synthetic_under_sampling(table: biom.Table, metadata: NumericMetadataColumn,
concatenate_meta_fp: Str, method: Str = 'RandomUnderSampler',
voting: Str = 'auto', n_jobs: Int = 1,
sampling_strategy: Str = 'auto',
random_state: Int = 42, output_log_fp: Str = None) -> biom.Table:
log_fp = tempfile.mktemp()
print("The log file will be writen into", log_fp)
if log_fp:
logger_ins = LOG(log_fp=log_fp).get_logger('synthetic_over_sampling')
logger_ins.info("The parameters used for oversampling are")
logger_ins.info('voting (will be used with ClusterCentroids only):', voting)
logger_ins.info('Sampling method:', method)
logger_ins.info('Output log file path:', log_fp)
logger_ins.info('sampling_strategy:', sampling_strategy)
logger_ins.info('n_jobs:', n_jobs)
logger_ins.info('random_state:', random_state)
cls = dispatcher[method]
if method != 'RandomUnderSampler':
table.norm(inplace=True)
if log_fp:
logger_ins.info("The input table is normalized before using it for oversampling")
sorted_table, sorted_metadata = _read_inputs(table, meta_data=metadata)
matrix_data = sorted_table.matrix_data.transpose().todense()
if method not in dispatcher:
raise ValueError(
'The optional methods for over sampling are', dispatcher.keys(), "instead it received", method
)
if method == 'RandomUnderSampler':
under_sampling_cls = cls(sampling_strategy=sampling_strategy, random_state=random_state, replacement=False)
else:
raise NotImplementedError("Method", method, "is not implemented yet")
X_resampled, y_resampled = under_sampling_cls.fit_resample(matrix_data, sorted_metadata)
if log_fp:
logger_ins.info("The under-sampling finished successfully!")
logger_ins.info("Overall, the size of data is", len(X_resampled))
if method == 'RandomUnderSampler':
dummy_samples_ids = under_sampling_cls.sample_indices_
dummy_samples = []
orig_samples = sorted_table.ids('sample')
for sample_id in dummy_samples_ids:
dummy_samples.append(orig_samples[sample_id])
else:
raise NotImplementedError("Method", method, "is not implemented yet")
under_sampling_dummy = sorted_table.filter(ids_to_keep=dummy_samples, inplace=False)
under_sampling_dummy = under_sampling_dummy.sort_order(order=dummy_samples, axis='sample')
if method == "RandomUnderSampler" and np.sum(under_sampling_dummy.matrix_data.transpose()-X_resampled) != 0:
raise ValueError("The undersampling changed the matrix data")
undersampled_table = biom.Table(X_resampled.transpose(), observation_ids=sorted_table.ids('observation'),
sample_ids=dummy_samples)
undersampled_metadata = | pd.DataFrame(index=dummy_samples, data=y_resampled) | pandas.DataFrame |
#! /usr/bin/env python
# coding=utf-8
import os
import pandas as pd
import urllib
import xml.etree.ElementTree as ET
import io
import itertools as IT
# Copyright © 2016 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
class Scraper:
"""
Scraper for parlament.ch
scraper.get(table_name): get the table, write it in csv file, return a pandas.data_frame
"""
def __init__(self, time_out=10, language='FR'):
self.tables = {'party': 'Party',
'person': 'Person',
'member_council': 'MemberCouncil',
'council': 'Council'}
self.url_base = "https://ws.parlament.ch/odata.svc/"
self.url_count = "$count"
self.url_lang_filter = "$filter=Language%20eq%20'" + language + "'"
self.folder = "data"
self.time_out = time_out
self.limit_api = 1000
def get(self, table_name):
"""
Load the table_name from parlament.ch
Write a csv file in self.folder / table_name
:return (pandas.data_frame): table
"""
table_size = self.count(table_name)
if table_name == 'BusinessRole':
df = self._inner_get_business_role(table_name)
elif table_name == 'BusinessStatus':
df = self._inner_get_big_table_skip(table_name)
elif table_size > 10000:
df = self._inner_get_big_table_ids(table_name)
elif table_size > 900:
df = self._inner_get_big_table_skip(table_name)
else:
df = self._inner_get_small_table(table_name)
self._inner_write_file(df, table_name)
return df
def count(self, table_name):
"""
Count request for parlament.ch server
:param table_name:
:return: number of entries in table_name
"""
url = self.url_base + table_name + "/$count?$filter=Language%20eq%20'FR'"
with urllib.request.urlopen(url) as response:
n = response.read()
# get the number from the bytes
n = int(str(n).split("'")[1])
return n
def _inner_get_and_parse(self, url):
"""
Send GET request to parlament.ch and parse the return XML file to a pandas.data_frame
:param url: (str) GET url request
:return: (pandas.data_frame) parsed XML
"""
print("GET:", url)
with urllib.request.urlopen(url) as url:
s = url.read()
# root = ET.fromstring(s)
root = self._inner_error_handling_xmlfromstring(s)
dict_ = {}
base = "{http://www.w3.org/2005/Atom}"
# base = self.base_url
for child in root.iter(base + 'entry'):
for children in child.iter(base + 'content'):
for properties in children:
for subject in properties:
# print(subject.text)
s = subject.tag.split('}')
if s[1] in dict_:
dict_[s[1]].append(subject.text)
else:
dict_[s[1]] = [subject.text]
data = pd.DataFrame(dict_)
return data
def _inner_error_handling_xmlfromstring(self, content):
""" Print XML if error while parsing (mainly due to server API timeout)"""
try:
tree = ET.fromstring(content)
except ET.ParseError as err:
lineno, column = err.position
line = next(IT.islice(io.BytesIO(content), lineno))
caret = '{:=>{}}'.format('^', column)
err.msg = '{}\n{}\n{}'.format(err, line, caret)
raise
return tree
def _inner_write_file(self, table, table_name):
""" Write table in csv file inside self.folder / table_name"""
self._inner_check_folder()
table.to_csv(self.folder + '/' + table_name + '.csv')
def _inner_get_big_table_skip(self, table_name):
"""
Loop URL request on table by step of 1000 and load data until reaches the end
Time Out after self.time_out iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = self.url_lang_filter
# loop parameters
limit_api = self.limit_api
data_frames = []
i = 0
top = 1000
skip = 0
while True:
url = base + table_name + '?' + "$top=" + str(top) + \
'&' + language + \
'&' + "$skip=" + str(skip)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
if df.shape == (0, 0):
break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", top)
break
data_frames.append(df)
# top += limit_api
skip += limit_api
i += 1
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
def _inner_get_big_table_ids(self, table_name):
"""
"skip" odata attribute leads to time out the parlament.ch server. Here we use id's to get directly intervals of
items.
Less safe than "skip version, because could stop if a big ID interval is not used (normally not the case)
Loop URL request on table by step of 1000 id's and load data until reaches the end
Time Out after 10 iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = self.url_lang_filter
id_from = "ID%20ge%20"
id_to = "%20and%20ID%20lt%20"
# loop parameters
limit_api = self.limit_api
data_frames = []
id_ = self._inner_get_smaller_id(table_name)
i = 0
n_downloaded = 0
expected_size = self.count(table_name)
while True:
url = base + table_name + '?' + language + '%20and%20' + id_from + str(id_) + id_to + str(id_ + limit_api)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
# if df.shape == (0, 0):
# break
# add number of elements downloaded
n_downloaded += df.shape[0]
# stop when downloaded the whole table
if n_downloaded >= expected_size:
break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", id_)
break
data_frames.append(df)
id_ += limit_api
i += 1
# concat all downloaded tables
df = | pd.concat(data_frames, ignore_index=True) | pandas.concat |
import pandas as pd
import sparse
import numpy as np
class AnnotationData:
"""
Contains all the segmentation and assignment data
WARNING: self.assignments['Clusternames'] will contain neurite ids (as strings) rather than names
"""
# Todo: if we can preserve segments instead of merging them when two segs are one same neuron, that would help
# (make possible) the classification
# TODO: what happens to features when neurons/segs are reassigned? features go rotten because the segment key is unchanged
def __init__(self, stem_savefile, frame_shape: tuple = (512, 512, 35)): # Todo: is it right to have a default value here?
"""
Initialize the class for segments and assignments
:param stem_savefile: The stem name for the files in which to save assignments and segments
:param frame_shape: the shape of the numpy array of any frame of the video
"""
self._normal_seg_file = stem_savefile + "_segmented.csv"
self._coarse_seg_file = stem_savefile + "_highthresh_segmented.csv"
self.assignment_file = stem_savefile + "_assignment.csv"
try:
self._normal_data_frame = pd.read_csv(self._normal_seg_file)
except FileNotFoundError:
self._normal_data_frame = pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int)
try:
self._coarse_data_frame = pd.read_csv(self._coarse_seg_file)
except FileNotFoundError:
self._coarse_data_frame = | pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int) | pandas.DataFrame |
import csv
import os
import sys
import re
import shutil
from urllib.request import urlopen
os.system(f"{sys.executable} -m pip install -U pytd==1.0.0")
def convert_directory_to_csv(directory, polarity, out_file_path):
with open(out_file_path, "a") as csvfile:
writer = csv.writer(csvfile)
for file_path in os.listdir(directory):
with open(os.path.join(directory, file_path), "r") as f:
sentence = f.read()
sentiment = re.match(r"\d+_(\d+)\.txt", file_path).group(1)
writer.writerow([sentence, sentiment, str(polarity)])
def convert_dataset(directory):
out_path = os.path.join("resources", "{}.csv".format(directory))
with open(out_path, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["sentence", "sentiment", "polarity"])
convert_directory_to_csv(
os.path.join("resources", "aclImdb", directory, "pos"), 1, out_path
)
convert_directory_to_csv(
os.path.join("resources", "aclImdb", directory, "neg"), 0, out_path
)
def load_directory_data(directory):
import pandas as pd
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with open(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match(r"\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
def load_dataset(directory):
import pandas as pd
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return | pd.concat([pos_df, neg_df]) | pandas.concat |
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from zvt import init_log, zvt_env
from zvt.api.quote import get_stock_factor_schema
from zvt.contract import IntervalLevel
from zvt.contract.api import df_to_db
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.recorders.joinquant.common import to_jq_trading_level, to_jq_entity_id
from zvt.domain import Stock,StockFactorCommon
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, now_pd_timestamp, TIME_FORMAT_DAY, TIME_FORMAT_ISO8601
try:
from jqdatasdk import auth, logout, get_factor_values
except:
pass
class JqChinaStockFactorRecorder(FixedCycleDataRecorder):
entity_provider = 'joinquant'
entity_schema = Stock
data_schema = StockFactorCommon
# 数据来自jq
provider = 'joinquant'
def __init__(self,
exchanges=['sh', 'sz'],
schema=None,
entity_ids=None,
codes=None,
batch_size=10,
force_update=True,
sleeping_time=0,
default_size=2000,
real_time=False,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None,
level=IntervalLevel.LEVEL_1WEEK,
kdata_use_begin_time=False,
close_hour=15,
close_minute=0,
one_day_trading_minutes=4 * 60,
) -> None:
level = IntervalLevel(level)
self.data_schema = get_stock_factor_schema(schema)
self.jq_trading_level = to_jq_trading_level(level)
super().__init__('stock', exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, level, kdata_use_begin_time, one_day_trading_minutes)
auth(zvt_env['jq_username'], zvt_env['jq_password'])
def on_finish(self):
super().on_finish()
logout()
def record(self, entity, start, end, size, timestamps):
now_date = to_time_str(now_pd_timestamp())
jq_entity_di = to_jq_entity_id(entity)
if size > 1000:
start_end_size = self.evaluate_start_end_size_timestamps(entity)
size = 1000
bdate= pd.bdate_range(start=start_end_size[0], periods=size)
self.start_timestamp = bdate[0]
self.end_timestamp = bdate[-1] if bdate[-1] <= now_pd_timestamp() else now_pd_timestamp()
if not self.end_timestamp:
factor_data = get_factor_values(securities=[jq_entity_di],
factors=self.data_schema.important_cols(),
end_date=now_date,
count=size)
else:
end_timestamp = to_time_str(self.end_timestamp)
if self.start_timestamp:
start_timestamp = to_time_str(self.start_timestamp)
else:
bdate_list = | pd.bdate_range(end=end_timestamp, periods=size) | pandas.bdate_range |
import operator
import os
from collections import defaultdict
from typing import Any, Dict, List
import pandas as pd
from tqdm import tqdm
class Dataset(object):
"""
Object for a data source that exists in the form of a list:
[
(source, target, timestamp),
(source, target, timestamp),
...
]
Initialize using this `data`, along with an optional `node_mapping`.
The `node_mapping` will be used to calculate the total number of nodes.
Otherwise, the number of active nodes (nodes found in `data` as sources
or targets) will be used. Note that this can affect the computation
of TieDecay PageRank values downstream.
"""
def __init__(self, data: List, node_mapping: Dict[int, Any] = None):
"""
Args:
data (list): list of timestamped edges (source, target, timestamp)
node_mapping (dict): optional mapping of node ids to node names, or other metadata
"""
self.adj_list = data
self.node_mapping = node_mapping
self._load_data()
return
@property
def nodes(self):
if self.node_mapping is not None:
self._nodes = set(self.node_mapping.keys())
else:
self._nodes = self.active_nodes
return self._nodes
@property
def num_nodes(self):
if self.node_mapping is not None:
self._num_nodes = len(set(self.node_mapping.keys()))
else:
self._num_nodes = len(self.active_nodes)
return self._num_nodes
def _load_data(self):
"""
Load the data into computationally-friendly format
"""
self.num_interactions = len(self.adj_list)
self.sort_interactions()
self.sources = [x[0] for x in self.adj_list]
self.targets = [x[1] for x in self.adj_list]
self.timestamps = [x[2] for x in self.adj_list]
self.active_nodes = set(self.sources + self.targets)
self.num_active_nodes = len(self.active_nodes)
# Convert the adjacency list to a dictionary with timestamps as keys
print("Converting to dictionary...")
self.adj_dict = defaultdict(list)
for i in tqdm(self.adj_list):
t = | pd.to_datetime(i[2]) | pandas.to_datetime |
import pandas as pd
import yfinance as yf
import config
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
from get_data_11_26 import get_data
from trading import get_trading_records
from visualization import plot_monthly_heatmap, plot_yearly_diff_comparison, plot_trading_behavior
def run(train_model=True, show_comp=True, show_behav=False, file_name=None):
if train_model:
# train model and get trading results
dax_trading_records = pd.DataFrame()
for ticker in config.SYMBOLS:
# for ticker in ["LIN.DE"]:
print(ticker)
# get all data
df = get_data(ticker, config.START, config.END)
# trade and
ticker_trading_records = get_trading_records(ticker, df)
dax_trading_records = dax_trading_records.append(ticker_trading_records)
dax_trading_records.to_excel(config.saving_path_trading_records+file_name)
else:
dax_trading_records = | pd.read_excel(config.saving_path_trading_records+file_name) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import baostock as bs
import pandas as pd
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取指数(综合指数、规模指数、一级行业指数、二级行业指数、策略指数、成长指数、价值指数、主题指数)K线数据
# 综合指数,例如:sh.000001 上证指数,sz.399106 深证综指 等;
# 规模指数,例如:sh.000016 上证50,sh.000300 沪深300,sh.000905 中证500,sz.399001 深证成指等;
# 一级行业指数,例如:sh.000037 上证医药,sz.399433 国证交运 等;
# 二级行业指数,例如:sh.000952 300地产,sz.399951 300银行 等;
# 策略指数,例如:sh.000050 50等权,sh.000982 500等权 等;
# 成长指数,例如:sz.399376 小盘成长 等;
# 价值指数,例如:sh.000029 180价值 等;
# 主题指数,例如:sh.000015 红利指数,sh.000063 上证周期 等;
# 详细指标参数,参见“历史行情指标参数”章节
rs = bs.query_history_k_data_plus("sh.000905",
"date,code,open,high,low,close,preclose,volume,amount,pctChg",
start_date='1990-01-01', end_date='2019-08-01', frequency="d")
print('query_history_k_data_plus respond error_code:' + rs.error_code)
print('query_history_k_data_plus respond error_msg:' + rs.error_msg)
# 打印结果集
data_list = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
data_list.append(rs.get_row_data())
result = | pd.DataFrame(data_list, columns=rs.fields) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def _get_page_num_dxsyl() -> int:
"""
东方财富网-数据中心-新股数据-打新收益率-总页数
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 总页数
:rtype: int
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": '1',
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
total_page = data_json["pages"]
return total_page
def stock_dxsyl_em() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl()
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": str(page),
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json["data"]])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"-",
]
big_df = big_df[[
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
]]
big_df["发行价"] = pd.to_numeric(big_df["发行价"], errors='coerce')
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["网上-发行中签率"] = pd.to_numeric(big_df["网上-发行中签率"])
big_df["网上-有效申购股数"] = pd.to_numeric(big_df["网上-有效申购股数"])
big_df["网上-有效申购户数"] = pd.to_numeric(big_df["网上-有效申购户数"])
big_df["网上-超额认购倍数"] = pd.to_numeric(big_df["网上-超额认购倍数"])
big_df["网下-配售中签率"] = pd.to_numeric(big_df["网下-配售中签率"])
big_df["网下-有效申购股数"] = pd.to_numeric(big_df["网下-有效申购股数"])
big_df["网下-有效申购户数"] = pd.to_numeric(big_df["网下-有效申购户数"])
big_df["网下-配售认购倍数"] = pd.to_numeric(big_df["网下-配售认购倍数"])
big_df["总发行数量"] = pd.to_numeric(big_df["总发行数量"])
big_df["开盘溢价"] = pd.to_numeric(big_df["开盘溢价"])
big_df["首日涨幅"] = pd.to_numeric(big_df["首日涨幅"])
big_df["打新收益"] = pd.to_numeric(big_df["打新收益"])
return big_df
def stock_xgsglb_em(symbol: str = "京市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param symbol: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
if symbol == "京市A股":
params = {
'sortColumns': 'APPLY_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'columns': 'ALL',
'reportName': 'RPT_NEEQ_ISSUEINFO_LIST',
'quoteColumns': 'f14~01~SECURITY_CODE~SECURITY_NAME_ABBR',
'source': 'NEEQSELECT',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, 1+int(total_page)), leave=False):
params.update({
'pageNumber': page
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
'序号',
'-',
'代码',
'-',
'简称',
'申购代码',
'发行总数',
'-',
'发行价格',
'发行市盈率',
'申购日',
'发行结果公告日',
'上市日',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'网上申购缴款日',
'网上申购退款日',
'-',
'网上获配比例',
'最新价',
'首日收盘价',
'网下有效申购倍数',
'每百股获利',
'-',
'-',
'-',
'-',
'-',
'-',
]
big_df = big_df[[
'序号',
'代码',
'简称',
'申购代码',
'发行总数',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'发行价格',
'最新价',
'首日收盘价',
'申购日',
'网上申购缴款日',
'网上申购退款日',
'上市日',
'发行结果公告日',
'发行市盈率',
'网上获配比例',
'网下有效申购倍数',
'每百股获利',
]]
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行数量'] = pd.to_numeric(big_df['网上发行数量'])
big_df['顶格申购所需资金'] = pd.to_numeric(big_df['顶格申购所需资金'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['网上获配比例'] = pd.to_numeric(big_df['网上获配比例'])
big_df['网下有效申购倍数'] = pd.to_numeric(big_df['网下有效申购倍数'])
big_df['每百股获利'] = pd.to_numeric(big_df['每百股获利'])
big_df['申购日'] = pd.to_datetime(big_df['申购日']).dt.date
big_df['网上申购缴款日'] = pd.to_datetime(big_df['网上申购缴款日']).dt.date
big_df['网上申购退款日'] = pd.to_datetime(big_df['网上申购退款日']).dt.date
big_df['上市日'] = pd.to_datetime(big_df['上市日']).dt.date
big_df['发行结果公告日'] = pd.to_datetime(big_df['发行结果公告日']).dt.date
return big_df
else:
params = {
'sortColumns': 'APPLY_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPTA_APP_IPOAPPLY',
'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS',
'filter': market_map[symbol],
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"股票代码",
"股票简称",
"_",
"申购代码",
"_",
"_",
"_",
"发行总数",
"网上发行",
"_",
"顶格申购需配市值",
"_",
"申购上限",
"_",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"中签率",
"询价累计报价倍数",
"_",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"行业市盈率",
"_",
"_",
"_",
]
big_df = big_df[
[
"股票代码",
"股票简称",
"申购代码",
"发行总数",
"网上发行",
"顶格申购需配市值",
"申购上限",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"行业市盈率",
"中签率",
"询价累计报价倍数",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
]
]
big_df['申购日期'] = pd.to_datetime(big_df['申购日期']).dt.date
big_df['中签号公布日'] = pd.to_datetime(big_df['中签号公布日']).dt.date
big_df['中签缴款日期'] = pd.to_datetime(big_df['中签缴款日期']).dt.date
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行'] = pd.to_numeric(big_df['网上发行'])
big_df['顶格申购需配市值'] = pd.to_numeric(big_df['顶格申购需配市值'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['行业市盈率'] = pd.to_numeric(big_df['行业市盈率'])
big_df['中签率'] = pd.to_numeric(big_df['中签率'])
big_df['询价累计报价倍数'] = pd.to_numeric(big_df['询价累计报价倍数'])
big_df['配售对象报价家数'] = pd.to_numeric(big_df['配售对象报价家数'])
big_df['涨幅'] = pd.to_numeric(big_df['涨幅'])
big_df['每中一签获利'] = pd.to_numeri | c(big_df['每中一签获利']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
| DataFrame(mat, index=[0, 1], columns=[0], dtype=float) | pandas.DataFrame |
import json
import copy
import click
import itertools
from collections import ChainMap
import logging
import pandas as pd
from twarc import ensure_flattened
log = logging.getLogger("twarc")
DEFAULT_TWEET_COLUMNS = """id
conversation_id
referenced_tweets.replied_to.id
referenced_tweets.retweeted.id
referenced_tweets.quoted.id
author_id
in_reply_to_user_id
retweeted_user_id
quoted_user_id
created_at
text
lang
source
public_metrics.like_count
public_metrics.quote_count
public_metrics.reply_count
public_metrics.retweet_count
reply_settings
possibly_sensitive
withheld.scope
withheld.copyright
withheld.country_codes
entities.annotations
entities.cashtags
entities.hashtags
entities.mentions
entities.urls
context_annotations
attachments.media
attachments.media_keys
attachments.poll.duration_minutes
attachments.poll.end_datetime
attachments.poll.id
attachments.poll.options
attachments.poll.voting_status
attachments.poll_ids
author.id
author.created_at
author.username
author.name
author.description
author.entities.description.cashtags
author.entities.description.hashtags
author.entities.description.mentions
author.entities.description.urls
author.entities.url.urls
author.location
author.pinned_tweet_id
author.profile_image_url
author.protected
author.public_metrics.followers_count
author.public_metrics.following_count
author.public_metrics.listed_count
author.public_metrics.tweet_count
author.url
author.verified
author.withheld.scope
author.withheld.copyright
author.withheld.country_codes
geo.coordinates.coordinates
geo.coordinates.type
geo.country
geo.country_code
geo.full_name
geo.geo.bbox
geo.geo.type
geo.id
geo.name
geo.place_id
geo.place_type
__twarc.retrieved_at
__twarc.url
__twarc.version
""".split(
"\n"
)
DEFAULT_USER_COLUMNS = """id
created_at
username
name
description
entities.description.cashtags
entities.description.hashtags
entities.description.mentions
entities.description.urls
entities.url.urls
location
pinned_tweet_id
profile_image_url
protected
public_metrics.followers_count
public_metrics.following_count
public_metrics.listed_count
public_metrics.tweet_count
url
verified
withheld.scope
withheld.copyright
withheld.country_codes
__twarc.retrieved_at
__twarc.url
__twarc.version
""".split(
"\n"
)
DEFAULT_COMPLIANCE_COLUMNS = """id
action
created_at
redacted_at
reason
""".split(
"\n"
)
DEFAULT_COUNTS_COLUMNS = """start
end
tweet_count
__twarc.retrieved_at
__twarc.url
__twarc.version
""".split(
"\n"
)
class DataFrameConverter:
"""
Convert a set of JSON Objects into a Pandas DataFrame object.
You can call this directly on a small set of objects, but memory is quickly consumed for larger datasets.
This class can accept individual tweets or whole response objects.
Args:
objects (iterable): JSON Objects to convert. Can be users, tweets, or other API objects.
input_data_type (str): data type: `tweets` or `users` or `compliance` or `counts`
Returns:
DataFrame: The objects provided as a Pandas DataFrame.
"""
def __init__(
self,
input_data_type="tweets",
json_encode_all=False,
json_encode_text=False,
json_encode_lists=True,
inline_referenced_tweets=False,
merge_retweets=True,
allow_duplicates=False,
extra_input_columns="",
output_columns=None,
dataset_ids=None,
counts=None,
):
self.json_encode_all = json_encode_all
self.json_encode_text = json_encode_text
self.json_encode_lists = json_encode_lists
self.inline_referenced_tweets = inline_referenced_tweets
self.merge_retweets = merge_retweets
self.allow_duplicates = allow_duplicates
self.input_data_type = input_data_type
self.columns = list()
if input_data_type == "tweets":
self.columns.extend(
x for x in DEFAULT_TWEET_COLUMNS if x not in self.columns
)
if input_data_type == "users":
self.columns.extend(
x for x in DEFAULT_USER_COLUMNS if x not in self.columns
)
if input_data_type == "compliance":
self.columns.extend(
x for x in DEFAULT_COMPLIANCE_COLUMNS if x not in self.columns
)
if input_data_type == "counts":
self.columns.extend(
x for x in DEFAULT_COUNTS_COLUMNS if x not in self.columns
)
if extra_input_columns:
self.columns.extend(
x for x in extra_input_columns.split(",") if x not in self.columns
)
self.output_columns = (
output_columns.split(",") if output_columns else self.columns
)
self.dataset_ids = dataset_ids if dataset_ids else set()
self.counts = (
counts
if counts
else {
"lines": 0,
"tweets": 0,
"referenced_tweets": 0,
"retweets": 0,
"quotes": 0,
"replies": 0,
"unavailable": 0,
"non_objects": 0,
"parse_errors": 0,
"duplicates": 0,
"rows": 0,
"input_columns": len(self.columns),
"output_columns": len(self.output_columns),
}
)
def _flatten_objects(self, objects):
"""
Generate flattened tweets from a batch of parsed lines.
"""
for o in objects:
for item in ensure_flattened(o):
yield item
def _inline_referenced_tweets(self, tweet):
"""
(Optional) Insert referenced tweets into the main CSV as new rows
"""
if "referenced_tweets" in tweet and self.inline_referenced_tweets:
for referenced_tweet in tweet["referenced_tweets"]:
# extract the referenced tweet as a new row
self.counts["referenced_tweets"] += 1
# inherit __twarc metadata from parent tweet
referenced_tweet["__twarc"] = (
tweet["__twarc"] if "__twarc" in tweet else None
)
# write tweet as new row if referenced tweet exists (has more than the 3 default fields):
if len(referenced_tweet.keys()) > 3:
yield self._format_tweet(referenced_tweet)
else:
self.counts["unavailable"] += 1
yield self._format_tweet(tweet)
def _format_tweet(self, tweet):
"""
Make the tweet objects easier to deal with, removing extra info and changing the structure.
"""
# Make a copy of the original flattened tweet
tweet = copy.deepcopy(tweet)
# Deal with pinned tweets for user datasets, `tweet` here is actually a user:
# remove the tweet from a user dataset, pinned_tweet_id remains:
tweet.pop("pinned_tweet", None)
# Remove in_reply_to_user, in_reply_to_user_id remains:
tweet.pop("in_reply_to_user", None)
if "referenced_tweets" in tweet:
# Count Replies:
replies = [
t for t in tweet["referenced_tweets"] if t["type"] == "replied_to"
]
reply_tweet = replies[-1] if replies else None
if "in_reply_to_user_id" in tweet or reply_tweet:
self.counts["replies"] += 1
# Extract Retweet only
rts = [t for t in tweet["referenced_tweets"] if t["type"] == "retweeted"]
retweeted_tweet = rts[-1] if rts else None
if retweeted_tweet and "author_id" in retweeted_tweet:
self.counts["retweets"] += 1
tweet["retweeted_user_id"] = retweeted_tweet["author_id"]
# Extract Quoted tweet
qts = [t for t in tweet["referenced_tweets"] if t["type"] == "quoted"]
quoted_tweet = qts[-1] if qts else None
if quoted_tweet and "author_id" in quoted_tweet:
self.counts["quotes"] += 1
tweet["quoted_user_id"] = quoted_tweet["author_id"]
# Process Retweets:
# If it's a native retweet, replace the "RT @user Text" with the original text, metrics, and entities, but keep the Author.
if retweeted_tweet and self.merge_retweets:
# A retweet inherits everything from retweeted tweet.
tweet["text"] = retweeted_tweet.pop("text", None)
tweet["entities"] = retweeted_tweet.pop("entities", None)
tweet["attachments"] = retweeted_tweet.pop("attachments", None)
tweet["context_annotations"] = retweeted_tweet.pop(
"context_annotations", None
)
tweet["public_metrics"] = retweeted_tweet.pop("public_metrics", None)
# reconstruct referenced_tweets object
referenced_tweets = [
{r["type"]: {"id": r["id"]}} for r in tweet["referenced_tweets"]
]
# leave behind references, but not the full tweets
# ChainMap flattens list into properties
tweet["referenced_tweets"] = dict(ChainMap(*referenced_tweets))
else:
tweet["referenced_tweets"] = {}
# Remove `type` left over from referenced tweets
tweet.pop("type", None)
# Remove empty objects
if "attachments" in tweet and not tweet["attachments"]:
tweet.pop("attachments", None)
if "entities" in tweet and not tweet["entities"]:
tweet.pop("entities", None)
if "public_metrics" in tweet and not tweet["public_metrics"]:
tweet.pop("public_metrics", None)
if "pinned_tweet" in tweet and not tweet["pinned_tweet"]:
tweet.pop("pinned_tweet", None)
return tweet
def _process_tweets(self, tweets):
"""
Count, deduplicate objects before adding them to the dataframe.
"""
for tweet in tweets:
if "id" in tweet:
tweet_id = tweet["id"]
self.counts["tweets"] += 1
if tweet_id in self.dataset_ids:
self.counts["duplicates"] += 1
if self.allow_duplicates:
yield tweet
else:
if tweet_id not in self.dataset_ids:
yield tweet
self.dataset_ids.add(tweet_id)
elif self.input_data_type == "counts":
self.counts["tweets"] += 1
yield tweet
else:
# non tweet objects are usually streaming API errors etc.
self.counts["non_objects"] += 1
def _process_dataframe(self, _df):
"""
Apply additional preprocessing to the DataFrame contents.
"""
# (Optional) json encode all
if self.json_encode_all:
_df = _df.applymap(json.dumps, na_action="ignore")
else:
# (Optional) text escape for any text fields
if self.json_encode_text:
_df = _df.applymap(
lambda x: json.dumps(x) if type(x) is str else x,
na_action="ignore",
)
else:
# Mandatory newline escape to prevent breaking csv format:
_df = _df.applymap(
lambda x: x.replace("\r", "").replace("\n", r"\n")
if type(x) is str
else x,
na_action="ignore",
)
# (Optional) json for lists
if self.json_encode_lists:
_df = _df.applymap(
lambda x: json.dumps(x) if pd.api.types.is_list_like(x) else x,
na_action="ignore",
)
return _df
def process(self, objects):
"""
Process the objects into a pandas dataframe.
"""
tweet_batch = itertools.chain.from_iterable(
self._process_tweets(self._inline_referenced_tweets(tweet))
for tweet in self._flatten_objects(objects)
)
_df = pd.json_normalize(list(tweet_batch))
# Check for mismatched columns
diff = set(_df.columns) - set(self.columns)
if len(diff) > 0:
click.echo(
click.style(
f"💔 ERROR: {len(diff)} Unexpected items in data! \n"
"Are you sure you specified the correct --input-data-type?\n"
"If the object type is correct, add extra columns with:"
f"\n--extra-input-columns \"{','.join(diff)}\"\nSkipping entire batch of {len(_df)} tweets!",
fg="red",
),
err=True,
)
log.error(
f"CSV Unexpected Data: \"{','.join(diff)}\". Expected {len(self.columns)} columns, got {len(_df.columns)}. Skipping entire batch of {len(_df)} tweets!"
)
self.counts["parse_errors"] += len(_df)
return | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-02
"""
This script cleans the census dataset for a given year and saves them to
the file_path provided. This script takes the census year and the csv file
containing the census data as arguments.
Usage: src/02_clean_wrangle/05_clean_census.py --census_file=<census_file> \
--year=<year> \
--file_path=<file_path>
Options:
--census_file=<census_file> csv file containing census data,
including file path.
--year=<year> census year.
--file_path=<file_path> Path to the exported files folder.
"""
from docopt import docopt
import pandas as pd
import os
import re
import warnings
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore")
opt = docopt(__doc__)
def create_subgroup_dict(df, year):
# separate dataframe by 'Variables' containing regex expressions:
if year == 2001:
re1 = ['total.*by', 'population.*by', 'common-law couples',
'^Male', '^Female', 'total - male', 'total - female']
elif year == 2006:
re1 = [r'total.*by', r'population.*by', r'common-law couples',\
r'^Male[s\s,]', r'^Female[s\s,]', r'total - mobility',\
r'Average number of children']
elif year == 2011:
df.drop(index=201, inplace=True)
re1 = ['total.*by', 'population.*by', 'common-law couples',
'males', 'Total population excluding institutional residents',
'Total.*in private households']
elif year == 2016:
re1 = ['^total', 'population.*by', 'males']
subgroup = list(df[df.Variable.str.contains('|'.join(re1),
flags=re.IGNORECASE)].index)
subgroup.append(len(df.Variable)+1)
subgroup = subgroup[1:]
# create census dictionary of sub datasets
# initialize variables for the lookup dictionary
start = 0
census_dict = {}
for s in subgroup:
sub_df = df.loc[start:s-1]
# transpose dataframe and rename column
sub_df = sub_df.set_index('Variable').T.reset_index()
sub_df = sub_df.rename(columns={'index': 'LocalArea'})
# check for duplicates and store dataframes into the dictionary
if df.Variable[start] in census_dict:
start = s
else:
census_dict[df.Variable[start]] = sub_df
start = s
return census_dict
###########################################################################
# HELPER FUNCTIONS
###########################################################################
def clean_age(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Type', 'Total', '0 to 4 years',
'5 to 9 years', '10 to 14 years', '15 to 19 years',
'20 to 24 years', '25 to 29 years', '30 to 34 years',
'35 to 39 years', '40 to 44 years', '45 to 49 years',
'50 to 54 years', '55 to 59 years', '60 to 64 years',
'65 to 69 years', '70 to 74 years', '75 to 79 years',
'80 to 84 years', '85 to 89 years', '90 to 94 years',
'95 to 99 years', '100 years and over']
male = census_dict['Male']
female = census_dict['Female']
female.insert(1, 'Type', 'female')
female.set_axis(col_names, axis=1, inplace=True)
male.insert(1, 'Type', 'male')
male.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([female, male])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
total = merged.groupby('LocalArea').sum()
total['Type'] = 'total'
total.reset_index(inplace=True)
merged = pd.concat([merged, total])
else:
if year == 2006:
col_names = ['LocalArea', 'Type', 'Total', '0 to 4 years',
'5 to 9 years', '10 to 14 years', '15 to 19 years',
'20 to 24 years', '25 to 29 years', '30 to 34 years',
'35 to 39 years', '40 to 44 years', '45 to 49 years',
'50 to 54 years', '55 to 59 years', '60 to 64 years',
'65 to 69 years', '70 to 74 years', '75 to 79 years',
'80 to 84 years', '85 to 89 years', '90 to 94 years',
'95 to 99 years', '100 years and over', 'Median Age']
total = census_dict['Male & Female, Total']
male = census_dict['Male, Total']
female = census_dict['Female, Total']
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total', '0 to 4 years',
'5 to 9 years', '10 to 14 years', '15 to 19 years',
'15 years', '16 years', '17 years', '18 years',
'19 years', '20 to 24 years', '25 to 29 years',
'30 to 34 years', '35 to 39 years', '40 to 44 years',
'45 to 49 years', '50 to 54 years', '55 to 59 years',
'60 to 64 years', '65 to 69 years', '70 to 74 years',
'75 to 79 years', '80 to 84 years',
'85 years and over', 'Median age',
'% of the population aged 15 and over']
total = census_dict['Total population by age groups']
male = census_dict['Males, total']
female = census_dict['Females, total']
elif year == 2016:
col_names = ['LocalArea', 'Type', 'Total', '0 to 14 years',
'0 to 4 years', '5 to 9 years', '10 to 14 years',
'15 to 64 years', '15 to 19 years',
'20 to 24 years', '25 to 29 years',
'30 to 34 years', '35 to 39 years',
'40 to 44 years', '45 to 49 years',
'50 to 54 years', '55 to 59 years',
'60 to 64 years', '65 years and over',
'65 to 69 years', '70 to 74 years',
'75 to 79 years', '80 to 84 years',
'85 years and over', '85 to 89 years',
'90 to 94 years', '95 to 99 years',
'100 years and over']
total = census_dict['Total - Age groups and average age of the population - 100% data']
male = census_dict['Total - Age groups and average age of males - 100% data']
female = census_dict['Total - Age groups and average age of females - 100% data']
female.insert(1, 'Type', 'female')
female.set_axis(col_names, axis=1, inplace=True)
male.insert(1, 'Type', 'male')
male.set_axis(col_names, axis=1, inplace=True)
total.insert(1, 'Type', 'total')
total.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([female, male, total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['population by age and sex'] = merged
merged.to_csv(file_path + '/population_age_sex.csv')
return census_dict
###############################################################################
def clean_marital_status(census_dict, year, file_path):
if year in [2001, 2006]:
col_names = ['LocalArea', 'Total population 15 years and over',
'Single (never legally married)', 'Married',
'Separated', 'Divorced', 'Widowed', 'total x',
'Not living common law', 'Living common law']
cols_ord = ['LocalArea', 'Total population 15 years and over',
'Married or living with a or common-law partner',
'Married', 'Living common law',
'Not living with a married spouse or common-law partner',
'Single (never legally married)', 'Separated',
'Divorced', 'Widowed']
df1 = census_dict['Total population 15 years and over by legal marital status']
df2 = census_dict['Total population 15 years and over by common-law status']
merged = pd.merge(df1, df2, on=['LocalArea'])
merged.set_axis(col_names, axis=1, inplace=True)
merged['Married or living with a or common-law partner'] = merged['Married'] + merged['Living common law']
merged['Not living with a married spouse or common-law partner'] = merged['Total population 15 years and over'] - merged['Married or living with a or common-law partner']
merged = merged[cols_ord]
else:
if year == 2011:
total = census_dict['Total population 15 years and over by marital status']
male = census_dict['Males 15 years and over by marital status']
female = census_dict['Females 15 years and over by marital status']
elif year == 2016:
total = census_dict['Total - Marital status for the population aged 15 years and over - 100% data']
male = census_dict['Total - Marital status for males aged 15 years and over - 100% data']
female = census_dict['Total - Marital status for females aged 15 years and over - 100% data']
col_names = ['LocalArea', 'Type',
'Total population 15 years and over',
'Married or living with a or common-law partner',
'Married', 'Living common law',
'Not living with a married spouse or common-law partner',
'Single (never legally married)', 'Separated',
'Divorced', 'Widowed']
female.insert(1, 'Type', 'female')
female.set_axis(col_names, axis=1, inplace=True)
male.insert(1, 'Type', 'male')
male.set_axis(col_names, axis=1, inplace=True)
total.insert(1, 'Type', 'total')
total.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([female, male, total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['marital status'] = merged
merged.to_csv(file_path + '/marital_status.csv')
return census_dict
###############################################################################
def clean_couple_fam_structure(census_dict, year, file_path):
col_names = ['LocalArea', 'Type', 'Total', 'Without children at home',
'With children at home', '1 child', '2 children',
'3 or more children']
if year == 2016:
total = census_dict['Total - Couple census families in private households - 100% data']
total.insert(1, 'Type', 'total couples')
total.set_axis(col_names, axis=1, inplace=True)
census_dict['couples - family structure'] = total
total.to_csv(file_path + '/couples_family_structure.csv')
else:
if year in [2011, 2006]:
married = census_dict['Total couple families by family structure and number of children']
married = married[['LocalArea', 'Married couples',
'Without children at home',
'With children at home', '1 child',
'2 children', '3 or more children']]
common_law = census_dict['Common-law couples']
elif year == 2001:
married = census_dict['Total couple families by family structure']
married = married[['LocalArea', 'Married couples',
'Without children at home',
'With children at home', '1 child',
'2 children', '3 or more children']]
common_law = census_dict['Common-law couples']
married.insert(1, 'Type', 'married couples')
married.set_axis(col_names, axis=1, inplace=True)
common_law.insert(1, 'Type', 'common-law couples')
common_law.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([married, common_law])
total = merged.groupby('LocalArea').sum()
total['Type'] = 'total couples'
total.reset_index(inplace=True)
merged = pd.concat([merged, total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['couples - family structure'] = merged
merged.to_csv(file_path + '/couples_family_structure.csv')
return census_dict
###############################################################################
def clean_language_detailed(census_dict, year, file_path):
if year == 2006:
mt_total = census_dict['Total population by mother tongue']
home_total = census_dict['Total population by language spoken most often at home']
home_total = home_total.iloc[:, 0:104].copy()
work_total = census_dict['Total population 15 years and over who worked since January 1, 2005 by language used most often at work']
mt_total.rename(columns={mt_total.columns[1]: 'Total'}, inplace=True)
mt_total.insert(1, 'Type', 'mother tongue - total')
home_total.rename(columns={home_total.columns[1]: 'Total'},
inplace=True)
home_total.insert(1, 'Type',
'language most often spoken at home - total')
work_total.rename(columns={work_total.columns[1]: 'Total'},
inplace=True)
work_total.insert(1, 'Type',
'language most often spoken at work - total')
merged = pd.concat([mt_total, home_total, work_total])
elif year == 2001:
mt_total = census_dict['Total population by mother tongue']
home_total = census_dict['Total population by home language']
home_total = home_total.groupby(home_total.columns, axis=1).sum()
mt_total.rename(columns={mt_total.columns[1]: 'Total'}, inplace=True)
mt_total.insert(1, 'Type', 'mother tongue - total')
home_total.rename(columns={'Total population by home language': 'Total'}, inplace=True)
home_total.insert(1, 'Type', 'language most often spoken at home - total')
merged = pd.concat([mt_total, home_total])
else:
if year == 2011:
mt_total = census_dict['Detailed mother tongue - Total population excluding institutional residents']
mt_male = census_dict['Detailed mother tongue - Males excluding institutional residents']
mt_female = census_dict['Detailed mother tongue - Females excluding institutional residents']
home_total = census_dict['Detailed language spoken most often at home - Total population excluding institutional residents']
home_male = census_dict['Detailed language spoken most often at home - Males excluding institutional residents']
home_female = census_dict['Detailed language spoken most often at home - Females excluding institutional residents']
home2_total = census_dict['Detailed other language spoken regularly at home - Total population excluding institutional residents']
home2_male = census_dict['Detailed other language spoken regularly at home - Males excluding institutional residents']
home2_female = census_dict['Detailed other language spoken regularly at home - Females excluding institutional residents']
elif year == 2016:
mt_total = census_dict['Total - Mother tongue for the total population excluding institutional residents - 100% data']
mt_male = census_dict['Total - Mother tongue for males excluding institutional residents - 100% data']
mt_female = census_dict['Total - Mother tongue for females excluding institutional residents - 100% data']
home_total = census_dict['Total - Language spoken most often at home for the total population excluding institutional residents - 100% data']
home_male = census_dict['Total - Language spoken most often at home for males excluding institutional residents - 100% data']
home_female = census_dict['Total - Language spoken most often at home for females excluding institutional residents - 100% data']
home2_total = census_dict['Total - Other language(s) spoken regularly at home for the total population excluding institutional residents - 100% data']
home2_male = census_dict['Total - Other language(s) spoken regularly at home for males excluding institutional residents - 100% data']
home2_female = census_dict['Total - Other language(s) spoken regularly at home for females excluding institutional residents - 100% data']
mt_female.rename(columns={mt_female.columns[1]: 'Total'}, inplace=True)
mt_female.insert(1, 'Type', 'mother tongue - female')
mt_male.rename(columns={mt_male.columns[1]: 'Total'}, inplace=True)
mt_male.insert(1, 'Type', 'mother tongue - male')
mt_total.rename(columns={mt_total.columns[1]: 'Total'}, inplace=True)
mt_total.insert(1, 'Type', 'mother tongue - total')
home_female.rename(columns={home_female.columns[1]: 'Total'}, inplace=True)
home_female.insert(1, 'Type', 'language most often spoken at home - female')
home_male.rename(columns={home_male.columns[1]: 'Total'}, inplace=True)
home_male.insert(1, 'Type', 'language most often spoken at home - male')
home_total.rename(columns={home_total.columns[1]: 'Total'}, inplace=True)
home_total.insert(1, 'Type', 'language most often spoken at home - total')
home2_female.rename(columns={home2_female.columns[1]: 'Total'}, inplace=True)
home2_female.insert(1, 'Type', 'other language spoken at home - female')
home2_male.rename(columns={home2_male.columns[1]: 'Total'}, inplace=True)
home2_male.insert(1, 'Type', 'other language spoken at home - male')
home2_total.rename(columns={home2_total.columns[1]: 'Total'}, inplace=True)
home2_total.insert(1, 'Type', 'other language spoken at home - total')
merged = pd.concat([mt_female, mt_male, mt_total,
home_female, home_male, home_total,
home2_female, home2_male, home2_total])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['detailed language'] = merged
merged.to_csv(file_path + '/detailed_language.csv')
return census_dict
###############################################################################
def clean_official_language(census_dict, year, file_path):
col_names = ['LocalArea', 'Type', 'Total', 'English', 'French',
'English and French', 'Neither English nor French']
if year == 2016:
known = census_dict['Total - Knowledge of official languages for the total population excluding institutional residents - 100% data']
first = census_dict['Total - First official language spoken for the total population excluding institutional residents - 100% data']
elif year == 2011:
known = census_dict['Knowledge of official languages - Total population excluding institutional residents']
first = census_dict['First official language spoken - Total population excluding institutional residents']
elif year in [2001, 2006]:
known = census_dict['Total population by knowledge of official languages']
first = census_dict['Total population by first official language spoken']
known.insert(1, 'Type', 'knowledge of official languages')
known.set_axis(col_names, axis=1, inplace=True)
first.insert(1, 'Type', 'first official language spoken')
first.set_axis(col_names, axis=1, inplace=True)
merged = pd.concat([known, first])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['official language'] = merged
merged.to_csv(file_path + '/official_language.csv')
return census_dict
###############################################################################
def clean_structural_dwelling_type(census_dict, year, file_path):
if year == 2006:
col_names = ['LocalArea', 'Total', 'Single-detached house',
'Semi-detached house', 'Row house', 'Apartment, duplex',
'Apartment, building that has five or more storeys']
df = census_dict['Total number of occupied private dwellings by structural type of dwelling']
elif year in [2001, 2011, 2016]:
col_names = ['LocalArea', 'Total', 'Single-detached house',
'Semi-detached house', 'Row house',
'Apartment, detached duplex',
'Apartment, building that has five or more storeys',
'Apartment, building that has fewer than five storeys',
'Other single-attached house', 'Movable dwelling']
if year == 2001:
df = census_dict['Total number of occupied private dwellings by structural type of dwelling']
df = df.iloc[:, 0:10].copy()
elif year == 2011:
df = census_dict['Total number of occupied private dwellings by structural type of dwelling']
df = df[['LocalArea',
'Total number of occupied private dwellings by structural type of dwelling',
'Single-detached house', 'Semi-detached house',
'Row house', 'Apartment, duplex',
'Apartment, building that has five or more storeys',
'Apartment, building that has fewer than five storeys',
'Other single-attached house', 'Movable dwelling']].copy()
elif year == 2016:
df = census_dict['Total - Occupied private dwellings by structural type of dwelling - 100% data']
df = df[['LocalArea',
'Total - Occupied private dwellings by structural type of dwelling - 100% data',
'Single-detached house', 'Semi-detached house',
'Row house', 'Apartment or flat in a duplex',
'Apartment in a building that has five or more storeys',
'Apartment in a building that has fewer than five storeys',
'Other single-attached house', 'Movable dwelling']].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['structural dwelling type'] = df
df.to_csv(file_path + '/structural_dwelling_type.csv')
return census_dict
###############################################################################
def clean_household_size(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Total households', '1 person', '2 persons',
'3 persons', '4 to 5 persons', '6 or more persons',
'Average household size']
df = census_dict['Total number of private households by household size']
elif year in [2006, 2011]:
col_names = ['LocalArea', 'Total households', '1 person', '2 persons',
'3 persons', '4 to 5 persons', '6 or more persons',
'Number of persons in private households',
'Average household size']
df = census_dict['Total number of private households by household size']
elif year == 2016:
col_names = ['LocalArea', 'Total households', '1 person', '2 persons',
'3 persons', '4 persons', '5 or more persons',
'Number of persons in private households',
'Average household size']
df = census_dict['Total - Private households by household size - 100% data']
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['household size'] = df
df.to_csv(file_path + '/household_size.csv')
return census_dict
###############################################################################
def clean_lone_parent(census_dict, year, file_path):
col_names = ['LocalArea', 'Total lone-parent families', 'Female parent',
'Male parent', '1 child', '2 children', '3 or more children']
if year == 2016:
df1 = census_dict["Total lone-parent families by sex of parent"]
df2 = census_dict["Total - Lone-parent census families in private households - 100% data"]
df = pd.concat([df1, df2], axis=1)
df = df.groupby(df.columns, axis=1).first()
df = df[['LocalArea', 'Total lone-parent families by sex of parent',
'Female parent', 'Male parent', '1 child', '2 children',
'3 or more children']].copy()
elif year == 2011:
df = census_dict['Total lone-parent families by sex of parent and number of children']
df = df.groupby(df.columns, axis=1).sum()
df = df[['LocalArea',
'Total lone-parent families by sex of parent and number of children',
'Female parent', 'Male parent', '1 child', '2 children',
'3 or more children']].copy()
elif year == 2006:
df1 = census_dict['Total lone-parent families by sex of parent and number of children']
df2 = census_dict['Female parent']
df2 = df2.iloc[:, 1:5].copy()
df3 = census_dict['Male parent']
df3 = df3.iloc[:, 1:5].copy()
df = pd.concat([df1, df2, df3], axis=1)
df = df.groupby(df.columns, axis=1).sum()
df = df[['LocalArea',
'Total lone-parent families by sex of parent and number of children',
'Female parent', 'Male parent', '1 child', '2 children',
'3 or more children']].copy()
elif year == 2001:
df1 = census_dict['Total lone-parent families by sex of parent']
df2 = census_dict['Female parent']
df2 = df2.iloc[:, 1:5].copy()
df3 = census_dict['Male parent']
df3 = df3.iloc[:, 1:5].copy()
df = pd.concat([df1, df2, df3], axis=1)
df = df.groupby(df.columns, axis=1).sum()
df = df[['LocalArea', 'Total lone-parent families by sex of parent',
'Female parent', 'Male parent', '1 child', '2 children',
'3 or more children']].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['lone_parent'] = df
df.to_csv(file_path + '/lone_parent.csv')
return census_dict
###############################################################################
def clean_immigration_age(census_dict, year, file_path):
if year in [2006, 2016]:
col_names = ['LocalArea', 'Total immigrant population',
'Under 5 years', '5 to 14 years', '15 to 24 years',
'25 to 44 years', '45 years and over']
if year == 2006:
df = census_dict['Total immigrant population by age at immigration']
elif year == 2016:
df = census_dict['Total - Age at immigration for the immigrant population in private households - 25% sample data']
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total immigrant population',
'Under 5 years', '5 to 14 years', '15 to 24 years',
'25 to 44 years', '45 years and over']
df = pd.read_csv('data/processed/nhs/Age at immigration.csv', index_col=0)
df = df[['LocalArea', 'Type',
'0_Total immigrant population in private households by age at immigration',
'1_Under 5 years', '2_5 to 14 years', '3_15 to 24 years',
'4_25 to 44 years', '5_45 years and over']].copy()
elif year == 2001:
col_names = ['LocalArea', 'Total immigrant population',
'Under 5 years', '5 to 19 years', '20 years and over']
df = census_dict['Total immigrant population by age at immigration']
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['immigration_age'] = df
df.to_csv(file_path + '/immigration_age.csv')
return census_dict
###############################################################################
def clean_immigration_period(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Total immigrant population', 'Before 1961',
'1961 to 1970', '1971 to 1980', '1981 to 1990',
'1991 to 1995', '1996 to 2001']
df = census_dict['Total immigrant population by period of immigration']
elif year == 2006:
col_names = ['LocalArea', 'Total immigrant population', 'Before 1961',
'1961 to 1970', '1971 to 1980', '1981 to 1990',
'1991 to 2000', '1991 to 1995', '1996 to 2000',
'2001 to 2006']
df = census_dict['Total immigrant population by period of immigration']
elif year == 2016:
col_names = ['LocalArea', 'Total population', 'Non-immigrants',
'Non-permanent residents', 'Immigrants',
'Before 1981', '1981 to 1990', '1991 to 2000',
'2001 to 2010', '2001 to 2005', '2006 to 2010',
'2011 to 2016']
df = census_dict['Total - Immigrant status and period of immigration for the population in private households - 25% sample data']
df = df[['LocalArea',
'Total - Immigrant status and period of immigration for the population in private households - 25% sample data',
'Non-immigrants', 'Non-permanent residents', 'Immigrants',
'Before 1981', '1981 to 1990', '1991 to 2000', '2001 to 2010',
'2001 to 2005', '2006 to 2010', '2011 to 2016']].copy()
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total population',
'Non-immigrants', 'Non-permanent residents', 'Immigrants',
'Before 1971', '1971 to 1980', '1981 to 1990',
'1991 to 2000', '2001 to 2005', '2006 to 2011']
df = pd.read_csv('data/processed/nhs/Immigrant status and period of immigration.csv', index_col=0)
df = df[['LocalArea', 'Type',
'0_Total population in private households by immigrant status and period of immigration',
'1_Non-immigrants', '10_Non-permanent residents',
'2_Immigrants', '3_Before 1971', '4_1971 to 1980',
'5_1981 to 1990', '6_1991 to 2000', '8_2001 to 2005',
'9_2006 to 2011']].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['immigration_period'] = df
df.to_csv(file_path + '/immigration_period.csv')
return census_dict
###############################################################################
def clean_visible_minority(census_dict, year, file_path):
col_names = ['LocalArea', 'Total population', 'Not a visible minority',
'Total visible minority population', 'Arab', 'Black',
'Chinese', 'Filipino', 'Japanese', 'Korean',
'Latin American', 'West Asian', 'South Asian',
'Southeast Asian', 'Multiple visible minorities',
'Other visible minority']
if year == 2001:
df = census_dict['Total population by visible minority groups']
df = df[['LocalArea', 'Total population by visible minority groups',
'All others', 'Total visible minority population',
'Arab', 'Black', 'Chinese', 'Filipino', 'Japanese',
'Korean', 'Latin American', 'West Asian', 'South Asian',
'Southeast Asian', 'Multiple visible minorities',
'Visible minority, n.i.e.']].copy()
elif year == 2006:
df = census_dict['Total population by visible minority groups']
df = df[['LocalArea', 'Total population by visible minority groups',
'Not a visible minority', 'Total visible minority population',
'Arab', 'Black', 'Chinese', 'Filipino', 'Japanese', 'Korean',
'Latin American', 'West Asian', 'South Asian',
'Southeast Asian', 'Multiple visible minority',
'Visible minority, n.i.e.']]
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total population',
'Not a visible minority',
'Total visible minority population',
'Arab', 'Black', 'Chinese', 'Filipino', 'Japanese',
'Korean', 'Latin American', 'West Asian', 'South Asian',
'Southeast Asian', 'Multiple visible minorities',
'Other visible minority']
df = pd.read_csv('data/processed/nhs/Visible minority population.csv', index_col=0)
df = df[['LocalArea', 'Type',
'0_Total population in private households by visible minority',
'14_Not a visible minority',
'1_Total visible minority population',
'7_Arab', '4_Black', '3_Chinese', '5_Filipino', '11_Japanese',
'10_Korean', '6_Latin American', '9_West Asian',
'2_South Asian', '8_Southeast Asian',
'13_Multiple visible minorities',
'12_Visible minority, n.i.e.']].copy()
elif year == 2016:
df = census_dict['Total visible minority population']
df = df[['LocalArea', 'Total visible minority population',
'Not a visible minority', 'Total visible minority population',
'Arab', 'Black', 'Chinese', 'Filipino', 'Japanese', 'Korean',
'Latin American', 'West Asian', 'South Asian',
'Southeast Asian', 'Multiple visible minorities',
'Visible minority, n.i.e.']]
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['visible_minority'] = df
df.to_csv(file_path + '/visible_minority.csv')
return census_dict
###############################################################################
def clean_birth_place(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Total population', 'Non-immigrants',
'Born in province of residence',
'Born outside province of residence',
'Non-permanent residents', 'Immigrants', 'United Kingdom',
"China", 'Italy', 'India', 'United States', 'Hong Kong',
'Philippines', 'Poland', 'Germany', 'Portugal',
'Viet Nam', 'Jamaica', 'Netherlands', 'Guyana', 'Greece',
'South Korea', 'France', 'Lebanon', 'Taiwan',
'Yugoslavia', 'Haiti', 'Ukraine', 'Croatia', 'Mexico',
'Egypt', 'South Africa', 'Ireland', 'Morocco', 'Austria',
'Switzerland', 'Other places of birth']
df1 = census_dict['Total population by immigrant status and place of birth']
df1 = df1.iloc[:, 1:5].copy()
df2 = census_dict['Total immigrants by selected places of birth']
df = pd.concat([df1, df2], axis=1)
df = df[['LocalArea',
'Total population by immigrant status and place of birth',
'Non-immigrant population', 'Born in province of residence',
'Born outside province of residence',
'Non-permanent residents',
'Total immigrants by selected places of birth',
'United Kingdom', "China, People's Republic of",
'Italy', 'India', 'United States',
'Hong Kong, Special Administrative Region', 'Philippines',
'Poland', 'Germany', 'Portugal', 'Viet Nam', 'Jamaica',
'Netherlands', 'Guyana', 'Greece', 'Korea, South', 'France',
'Lebanon', 'Taiwan', 'Yugoslavia', 'Haiti', 'Ukraine',
'Croatia', 'Mexico', 'Egypt', 'South Africa, Republic of',
'Ireland, Republic of (EIRE)', 'Morocco', 'Austria',
'Switzerland', 'All other places of birth']].copy()
elif year == 2006:
col_names = ['LocalArea', 'Total population', 'Non-immigrants',
'Born in province of residence',
'Born outside province of residence',
'Non-permanent residents', 'Immigrants', 'United States',
'Central America', 'Caribbean and Bermuda',
'South America', 'Europe', 'Western Europe',
'Eastern Europe', 'Southern Europe',
'Italy', 'Other Southern Europe', 'Northern Europe',
'United Kingdom', 'Other Northern Europe', 'Africa',
'Western Africa', 'Eastern Africa', 'Northern Africa',
'Central Africa', 'Southern Africa',
'Asia and the Middle East',
'West Central Asia and the Middle East', 'Eastern Asia',
'China', 'Hong Kong', 'Other Eastern Asia',
'Southeast Asia', 'Philippines', 'Other Southeast Asia',
'Southern Asia', 'India', 'Other Southern Asia',
'Oceania and other']
df = census_dict['Total population by immigrant status and place of birth']
df = df[['LocalArea',
'Total population by immigrant status and place of birth',
'Non-immigrants', 'Born in province of residence',
'Born outside province of residence',
'Non-permanent residents', 'Immigrants',
'United States of America', 'Central America',
'Caribbean and Bermuda', 'South America', 'Europe',
'Western Europe', 'Eastern Europe', 'Southern Europe',
'Italy', 'Other Southern Europe', 'Northern Europe',
'United Kingdom', 'Other Northern Europe', 'Africa',
'Western Africa', 'Eastern Africa', 'Northern Africa',
'Central Africa', 'Southern Africa',
'Asia and the Middle East',
'West Central Asia and the Middle East', 'Eastern Asia',
"China, People's Republic of",
'Hong Kong, Special Administrative Region',
'Other Eastern Asia', 'Southeast Asia', 'Philippines',
'Other Southeast Asia', 'Southern Asia', 'India',
'Other Southern Asia', 'Oceania and other']]
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total population', 'Non-immigrants',
'Born in province of residence',
'Born outside province of residence',
'Non-permanent residents', 'Immigrants',
'Afghanistan', 'Africa', 'Algeria', 'Americas', 'Asia',
'Bangladesh', 'Bosnia and Herzegovina', 'Chile', 'China',
'Colombia', 'Croatia', 'Egypt', 'El Salvador', 'Ethiopia',
'Europe', 'Fiji', 'France', 'Germany', 'Greece', 'Guyana',
'Haiti', 'Hong Kong', 'Hungary', 'India', 'Iran', 'Iraq',
'Ireland', 'Italy', 'Jamaica', 'Japan', 'Kenya',
'South Korea', 'Lebanon', 'Mexico', 'Morocco',
'Netherlands', 'Nigeria', 'Pakistan', 'Peru',
'Philippines', 'Poland', 'Portugal', 'Romania', 'Russia',
'Serbia', 'South Africa', 'Sri Lanka', 'Taiwan',
'Trinidad and Tobago', 'Turkey', 'Ukraine',
'United Kingdom', 'United States', 'Viet Nam',
'Oceania and other', 'Other Africa', 'Other Americas',
'Other Asia', 'Other Europe', 'Other places of birth']
df = pd.read_csv('data/processed/nhs/Immigrant status and selected places of birth.csv', index_col=0)
df = df[['LocalArea', 'Type',
'0_Total population in private households by immigrant status and selected places of birth',
'1_Non-immigrants', '2_Born in province of residence',
'3_Born outside province of residence',
'65_Non-permanent residents', '4_Immigrants',
'58_Afghanistan', '35_Africa', '37_Algeria', '5_Americas',
'44_Asia', '57_Bangladesh', '31_Bosnia and Herzegovina',
'15_Chile', '46_China', '12_Colombia', '29_Croatia',
'38_Egypt', '13_El Salvador', '41_Ethiopia', '17_Europe',
'63_Fiji', '24_France', '20_Germany', '27_Greece', '8_Guyana',
'9_Haiti', '48_Hong Kong Special Administrative Region',
'30_Hungary', '45_India', '52_Iran', '56_Iraq',
'33_Ireland, Republic of', '19_Italy', '7_Jamaica',
'59_Japan', '42_Kenya', '53_Korea, South', '54_Lebanon',
'10_Mexico', '36_Morocco', '23_Netherlands', '40_Nigeria',
'50_Pakistan', '14_Peru', '47_Philippines', '21_Poland',
'22_Portugal', '25_Romania', '26_Russian Federation',
'32_Serbia', '39_South Africa, Republic of', '51_Sri Lanka',
'55_Taiwan', '11_Trinidad and Tobago', '60_Turkey',
'28_Ukraine', '18_United Kingdom', '6_United States',
'49_Viet Nam', '62_Oceania and other',
'43_Other places of birth in Africa',
'16_Other places of birth in Americas',
'61_Other places of birth in Asia',
'34_Other places of birth in Europe',
'64_Other places of birth']].copy()
elif year == 2016:
col_names = ['LocalArea', 'Total population', 'Non-immigrants',
'Non-permanent residents', 'Immigrants', 'Americas',
'Brazil', 'Colombia', 'El Salvador', 'Guyana', 'Haiti',
'Jamaica', 'Mexico', 'Peru', 'Trinidad and Tobago',
'United States', 'Other Americas', 'Europe',
'Bosnia and Herzegovina', 'Croatia', 'France', 'Germany',
'Greece', 'Hungary', 'Ireland', 'Italy', 'Netherlands',
'Poland', 'Portugal', 'Romania', 'Russia', 'Serbia',
'Ukraine', 'United Kingdom', 'Other Europe', 'Africa',
'Algeria', 'Egypt', 'Ethiopia', 'Kenya', 'Morocco',
'Nigeria', 'Somalia', 'South Africa', 'Other Africa',
'Asia', 'Afghanistan', 'Bangladesh', 'China', 'Hong Kong',
'India', 'Iran', 'Iraq', 'Japan', 'South Korea',
'Lebanon', 'Pakistan', 'Philippines', 'Sri Lanka',
'Syria', 'Taiwan', 'Viet Nam', 'Other Asia',
'Oceania and other places of birth']
df1 = census_dict['Total - Immigrant status and period of immigration for the population in private households - 25% sample data']
df1 = df1[['Total - Immigrant status and period of immigration for the population in private households - 25% sample data',
'Non-immigrants', 'Non-permanent residents']].copy()
df2 = census_dict['Total - Selected places of birth for the immigrant population in private households - 25% sample data']
df = pd.concat([df1, df2], axis=1)
df = df[['LocalArea',
'Total - Immigrant status and period of immigration for the population in private households - 25% sample data',
'Non-immigrants', 'Non-permanent residents',
'Total - Selected places of birth for the immigrant population in private households - 25% sample data',
'Americas', 'Brazil', 'Colombia', 'El Salvador', 'Guyana',
'Haiti', 'Jamaica', 'Mexico', 'Peru', 'Trinidad and Tobago',
'United States', 'Other places of birth in Americas',
'Europe', 'Bosnia and Herzegovina', 'Croatia', 'France',
'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy',
'Netherlands', 'Poland', 'Portugal', 'Romania',
'Russian Federation', 'Serbia', 'Ukraine', 'United Kingdom',
'Other places of birth in Europe', 'Africa', 'Algeria',
'Egypt', 'Ethiopia', 'Kenya', 'Morocco', 'Nigeria', 'Somalia',
'South Africa, Republic of',
'Other places of birth in Africa', 'Asia', 'Afghanistan',
'Bangladesh', 'China', 'Hong Kong', 'India', 'Iran', 'Iraq',
'Japan', 'Korea, South', 'Lebanon', 'Pakistan', 'Philippines',
'Sri Lanka', 'Syria', 'Taiwan', 'Viet Nam',
'Other places of birth in Asia',
'Oceania and other places of birth']].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['immigration_birth_place'] = df
df.to_csv(file_path + '/immigration_birth_place.csv')
return census_dict
###############################################################################
def clean_shelter_tenure(census_dict, year, file_path):
col_names = ['LocalArea', 'Total number of dwellings', 'Owned', 'Rented',
'Band housing']
if year == 2001:
df = census_dict['Total number of occupied private dwellings by tenure']
elif year == 2006:
df = census_dict['Total number of occupied private dwellings by housing tenure']
elif year == 2011:
col_names = ['LocalArea', 'Type', 'Total number of dwellings',
'Owned', 'Rented']
df = pd.read_csv('data/processed/nhs/Shelter costs.csv', index_col=0)
df = df[['LocalArea', 'Type',
'0_Total number of owner and tenant households with household total income greater than zero, in non-farm, non-reserve private dwellings by shelter-cost-to-income ratio',
'4_Number of owner households in non-farm, non-reserve private dwellings',
'11_Number of tenant households in non-farm, non-reserve private dwellings']].copy()
elif year == 2016:
df = census_dict['Total - Private households by tenure - 25% sample data']
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['shelter_tenure'] = df
df.to_csv(file_path + '/shelter_tenure.csv')
return census_dict
###############################################################################
def clean_education(census_dict, year, file_path):
if year == 2001:
col_names = ['LocalArea', 'Education',
'Visual and performing arts, and communications technologies',
'Humanities', 'Social and behavioural sciences and law',
'Business, management and public administration',
'Agriculture, natural resources and conservation',
'Engineering and applied sciences',
'Applied science technologies and trades',
'Health and related fields',
'Mathematics, computer and information sciences', 'No specialization',
'Total population with postsecondary qualifications',
'Total population 20 years and over',
'population 20 years and over - Less than grade 9',
'population 20 years and over - Grades 9 to 13',
'population 20 years and over - Without High school diploma or equivalent',
'population 20 years and over - High school diploma or equivalent',
'population 20 years and over - Apprenticeship or trades certificate or diploma',
'population 20 years and over - College',
'population 20 years and over - College without certificate or diploma',
'population 20 years and over - College, CEGEP or other non-university certificate or diploma',
'population 20 years and over - University',
'population 20 years and over - University without degree',
'population 20 years and over - University without certificate or diploma',
'population 20 years and over - University with certificate or diploma',
"population 20 years and over - University certificate, diploma or degree at bachelor level or above"]
df1 = census_dict['Total population of females with postsecondary qualifications by major field of study']
df2 = census_dict['Total population of males with postsecondary qualifications by major field of study']
df3 = census_dict['Total population 20 years and over by highest level of schooling']
df3 = df3.iloc[:, 1:20].copy()
df4 = pd.concat([df1, df2])
df4 = df4.groupby('LocalArea').sum()
df4['Total population with postsecondary qualifications'] = df4['Total population of females with postsecondary qualifications by major field of study']+ df4['Total population of males with postsecondary qualifications by major field of study']
df4.reset_index(inplace=True)
df = pd.concat([df4, df3], axis=1)
df.drop(columns=['Total population of females with postsecondary qualifications by major field of study',
'Total population of males with postsecondary qualifications by major field of study'], inplace=True)
elif year == 2006:
col_names = ['LocalArea', 'Total population aged 15 years and over',
'population aged 15 years and over - No certificate, diploma or degree',
'population aged 15 years and over - Certificate, diploma or degree',
'population aged 15 years and over - High school certificate or equivalent',
'population aged 15 years and over - Apprenticeship or trades certificate or diploma',
'population aged 15 years and over - College, CEGEP or other non-university certificate or diploma',
'population aged 15 years and over - University certificate, diploma or degree',
'population aged 15 years and over - University certificate or diploma below bachelor level',
'population aged 15 years and over - University certificate or degree', "Bachelor's degree",
'population aged 15 years and over - University certificate or diploma above bachelor level',
'population aged 15 years and over - Degree in medicine, dentistry, veterinary medicine or optometry',
"population aged 15 years and over - Master's degree",
'population aged 15 years and over - Earned doctorate',
'Total population 25 to 64 years with postsecondary qualifications',
'Education',
'Visual and performing arts, and communications technologies',
'Humanities', 'Social and behavioural sciences and law',
'Business, management and public administration',
'Physical and life sciences and technologies',
'Mathematics, computer and information sciences',
'Architecture, engineering, and related technologies',
'Agriculture, natural resources and conservation',
'Health, parks, recreation and fitness',
'Personal, protective and transportation services',
'Other fields of study']
df1 = census_dict['Total male population 25 to 64 years with postsecondary qualifications by major field of study - Classification of Instructional Programs, 2000']
df2 = census_dict['Total female population 25 to 64 years with postsecondary qualifications by major field of study - Classification of Instructional Programs, 2000']
df = pd.concat([df1, df2])
df = df.groupby('LocalArea').sum()
df['Total population 25 to 64 years with postsecondary qualifications'] = df['Total male population 25 to 64 years with postsecondary qualifications by major field of study - Classification of Instructional Programs, 2000']+df['Total female population 25 to 64 years with postsecondary qualifications by major field of study - Classification of Instructional Programs, 2000']
df.reset_index(inplace=True)
df3 = census_dict['Total population 15 to 24 years by highest certificate, diploma or degree']
df4 = census_dict['Total population 25 to 64 years by highest certificate, diploma or degree']
df5 = pd.concat([df3, df4])
df5 = df5.groupby('LocalArea').sum()
df5['Total population 15 years and over'] = df5['Total population 15 to 24 years by highest certificate, diploma or degree'] + df5['Total population 25 to 64 years by highest certificate, diploma or degree']
df5.reset_index(inplace=True)
df5 = df5.iloc[:, 1:20].copy()
df = pd.concat([df, df5], axis=1)
df = df[['LocalArea', 'Total population 15 years and over',
'No certificate, diploma or degree',
'Certificate, diploma or degree',
'High school certificate or equivalent',
'Apprenticeship or trades certificate or diploma',
'College, CEGEP or other non-university certificate or diploma',
'University certificate, diploma or degree',
'University certificate or diploma below bachelor level',
'University certificate or degree', "Bachelor's degree",
'University certificate or diploma above bachelor level',
'Degree in medicine, dentistry, veterinary medicine or optometry',
"Master's degree", 'Earned doctorate',
'Total population 25 to 64 years with postsecondary qualifications',
'Education',
'Visual and performing arts, and communications technologies',
'Humanities', 'Social and behavioural sciences and law',
'Business, management and public administration',
'Physical and life sciences and technologies',
'Mathematics, computer and information sciences',
'Architecture, engineering, and related technologies',
'Agriculture, natural resources and conservation',
'Health, parks, recreation and fitness',
'Personal, protective and transportation services',
'Other fields of study']].copy()
elif year == 2011:
col_names = ['LocalArea', 'Type',
'Total population aged 15 years and over',
'population aged 15 years and over - No certificate, diploma or degree',
'population aged 15 years and over - High school diploma or equivalent',
'population aged 15 years and over - Postsecondary certificate, diploma or degree',
'population aged 15 years and over - Apprenticeship or trades certificate or diploma',
'population aged 15 years and over - College, CEGEP or other non-university certificate or diploma',
'population aged 15 years and over - University certificate or diploma below bachelor level',
'population aged 15 years and over - University certificate, diploma or degree at bachelor level or above',
"population aged 15 years and over - Bachelor's degree",
'population aged 15 years and over - University certificate, diploma or degree above bachelor level',
'Total population aged 25 to 64 years',
'population aged 25 to 64 years - No certificate, diploma or degree',
'population aged 25 to 64 years - High school diploma or equivalent',
'population aged 25 to 64 years - Postsecondary certificate, diploma or degree',
'population aged 25 to 64 years - Apprenticeship or trades certificate or diploma',
'population aged 25 to 64 years - College, CEGEP or other non-university certificate or diploma',
'population aged 25 to 64 years - University certificate or diploma below bachelor level',
'population aged 25 to 64 years - University certificate, diploma or degree at bachelor level or above',
"population aged 25 to 64 years - Bachelor's degree",
'population aged 25 to 64 years - University certificate, diploma or degree above bachelor level',
'Education',
'Visual and performing arts, and communications technologies',
'Humanities', 'Social and behavioural sciences and law',
'Business, management and public administration',
'Physical and life sciences and technologies',
'Mathematics, computer and information sciences',
'Architecture, engineering, and related technologies',
'Agriculture, natural resources and conservation',
'Health and related fields',
'Personal, protective and transportation services',
'Other fields of study',
'population aged 15 years and over - No postsecondary certificate, diploma or degree',
'population aged 15 years and over - With postsecondary certificate, diploma or degree']
df = pd.read_csv('data/processed/nhs/Education.csv', index_col=0)
df = df[['LocalArea', 'Type',
'0_Total population aged 15 years and over by highest certificate, diploma or degree',
'1_No certificate, diploma or degree',
'2_High school diploma or equivalent',
'3_Postsecondary certificate, diploma or degree',
'4_Apprenticeship or trades certificate or diploma',
'5_College, CEGEP or other non-university certificate or diploma',
'6_University certificate or diploma below bachelor level',
'7_University certificate, diploma or degree at bachelor level or above',
"8_Bachelor's degree",
'9_University certificate, diploma or degree above bachelor level',
'10_Total population aged 25 to 64 years by highest certificate, diploma or degree',
'11_No certificate, diploma or degree',
'12_High school diploma or equivalent',
'13_Postsecondary certificate, diploma or degree',
'14_Apprenticeship or trades certificate or diploma',
'15_College, CEGEP or other non-university certificate or diploma',
'16_University certificate or diploma below bachelor level',
'17_University certificate, diploma or degree at bachelor level or above',
"18_Bachelor's degree",
'19_University certificate, diploma or degree above bachelor level',
'22_Education',
'23_Visual and performing arts, and communications technologies',
'24_Humanities', '25_Social and behavioural sciences and law',
'26_Business, management and public administration',
'27_Physical and life sciences and technologies',
'28_Mathematics, computer and information sciences',
'29_Architecture, engineering, and related technologies',
'30_Agriculture, natural resources and conservation',
'31_Health and related fields',
'32_Personal, protective and transportation services',
'33_Other fields of study',
'35_No postsecondary certificate, diploma or degree',
'36_With postsecondary certificate, diploma or degree']].copy()
elif year == 2016:
col_names = ['LocalArea', 'Total population aged 15 years and over',
'population aged 15 years and over - No certificate, diploma or degree',
'population aged 15 years and over - High school diploma or equivalent',
'population aged 15 years and over - Postsecondary certificate, diploma or degree',
'population aged 15 years and over - Apprenticeship or trades certificate or diploma',
'population aged 15 years and over - Trades certificate or diploma',
'population aged 15 years and over - Certificate of Apprenticeship or Certificate of Qualification',
'population aged 15 years and over - College, CEGEP or other non-university certificate or diploma',
'population aged 15 years and over - University certificate or diploma below bachelor level',
'population aged 15 years and over - University certificate, diploma or degree at bachelor level or above',
"population aged 15 years and over - Bachelor's degree",
'population aged 15 years and over - University certificate or diploma above bachelor level',
'population aged 15 years and over - Degree in medicine, dentistry, veterinary medicine or optometry',
"population aged 15 years and over - Master's degree",
'population aged 15 years and over - Earned doctorate',
'population aged 15 years and over - No postsecondary certificate, diploma or degree',
'Education',
'Visual and performing arts, and communications technologies',
'Humanities', 'Social and behavioural sciences and law',
'Business, management and public administration',
'Physical and life sciences and technologies',
'Mathematics, computer and information sciences',
'Architecture, engineering, and related technologies',
'Agriculture, natural resources and conservation',
'Personal, protective and transportation services',
'Other fields of study']
df1 = census_dict['Total - Highest certificate, diploma or degree for the population aged 15 years and over in private households - 25% sample data']
df2 = census_dict['Total - Major field of study - Classification of Instructional Programs (CIP) 2016 for the population aged 15 years and over in private households - 25% sample data']
df2 = df2.iloc[:, 1:70].copy()
df = pd.concat([df1, df2], axis=1)
df = df[['LocalArea',
'Total - Highest certificate, diploma or degree for the population aged 15 years and over in private households - 25% sample data',
'No certificate, diploma or degree',
'Secondary (high) school diploma or equivalency certificate',
'Postsecondary certificate, diploma or degree',
'Apprenticeship or trades certificate or diploma',
'Trades certificate or diploma other than Certificate of Apprenticeship or Certificate of Qualification',
'Certificate of Apprenticeship or Certificate of Qualification',
'College, CEGEP or other non-university certificate or diploma',
'University certificate or diploma below bachelor level',
'University certificate, diploma or degree at bachelor level or above',
"Bachelor's degree",
'University certificate or diploma above bachelor level',
'Degree in medicine, dentistry, veterinary medicine or optometry',
"Master's degree", 'Earned doctorate',
'No postsecondary certificate, diploma or degree',
'Education',
'Visual and performing arts, and communications technologies',
'Humanities', 'Social and behavioural sciences and law',
'Business, management and public administration',
'Physical and life sciences and technologies',
'Mathematics, computer and information sciences',
'Architecture, engineering, and related technologies',
'Agriculture, natural resources and conservation',
'Personal, protective and transportation services',
'Other']].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['education'] = df
df.to_csv(file_path + '/education.csv')
return census_dict
###############################################################################
def clean_household_type(census_dict, year, file_path):
col_names = ['LocalArea',
'Total number of private households by household type',
'One-family households', 'Multiple-family households',
'Non-family households']
if year == 2001:
df = census_dict['Total number of private households by household type']
df = df[col_names]
elif year == 2006:
df = census_dict['Total number of private households by household type']
df = df[col_names]
elif year == 2011:
col_names = ['LocalArea',
'Total number of private households by household type',
'One-family only households', 'Couple family households',
'Other family households']
df = census_dict['Total number of private households by household type']
df = df.iloc[:, 0:9]
df = df[col_names]
elif year == 2016:
col_names = ['LocalArea',
'Total - Private households by household type - 100% data',
'One-census-family households',
'Multiple-census-family households',
'Non-census-family households']
df = census_dict['Total - Private households by household type - 100% data']
df = df[col_names]
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['household_type'] = df
df.to_csv(file_path + '/household_type.csv')
return census_dict
###############################################################################
def clean_citizenship(census_dict, year, file_path):
col_names = ['LocalArea', 'Canadian citizens', 'Not Canadian citizens']
if year == 2001:
col_names = ['LocalArea', 'Canadian Citizenship',
'Citizenship other than Canadian']
df = census_dict['Total population by citizenship']
df = df[col_names]
elif year == 2006:
df = census_dict['Total population by citizenship']
df = df[col_names]
elif year == 2011:
df = | pd.read_csv('data/processed/nhs/Citizenship.csv', index_col=0) | pandas.read_csv |
import re
import csv
from collections import Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
pattern = re.compile(r"(\d+) (.+), (.+), CA (\d+), USA")
class Employee(object):
def __init__(self,segments):
# address
matched = pattern.match(segments[0])
if matched is None:
raise Exception('format not supported')
self.building_no = int( matched.group(1) )
self.street = matched.group(2).lower()
self.city = matched.group(3)
self.zipcode = int( matched.group(4) )
# employee-id
self.id = int(segments[1])
employees = []
invalid_employees = []
address_file = "Employee_Addresses.csv"
with open(address_file,"rt") as inf:
reader = csv.reader(inf)
for segments in reader:
try:
employees.append(Employee(segments))
except:
invalid_employees.append(segments[0])
################
streets_counter = Counter((e.street for e in employees))
streets_counts = | pd.Series(streets_counter) | pandas.Series |
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
import json
import re
from datetime import datetime
import numpy as np
comm = re.compile("<!--|-->")
class Team: #change team player object
def __init__(self, team, year, player=None):
self.year = year
self.team = team
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}.html".format(self.team, self.year)).text
self.soup = BeautifulSoup(re.sub("<!--|-->","",self.team_stat),"html.parser")
def team_sum(self, four_factor = False):
summary_container = self.soup.find("table",id="team_misc")
summary_table = summary_container.find("tbody")
team_sum_row = summary_table.find_all("tr")
dict_league_rank = {row['data-stat']:row.get_text() for row in team_sum_row[1]}
dict_team_sum = {row['data-stat']:row.get_text() for row in team_sum_row[0]}
del dict_team_sum['player'], dict_league_rank['player']
df_team = pd.DataFrame(data = [dict_team_sum, dict_league_rank],index = ['TEAM','LEAGUE']).T
for column in df_team.columns:
try:
df_team[column] = pd.to_numeric(df_team[column])
except:
pass
if four_factor:
off_stats = df_team.loc[['tov_pct',
'pace', 'orb_pct', 'efg_pct', 'ft_rate']]
off_stats.columns = ['Team','OFF']
# off_stats['Team'] = off_stats['Team'].apply(lambda x: float(x))
def_stats = df_team.loc[['opp_tov_pct',
'pace', 'drb_pct', 'opp_efg_pct', 'opp_ft_rate']]
def_stats.columns = ['Team','DEF']
# def_stats['Team'] = def_stats['Team'].apply(lambda x: float(x))
return off_stats, def_stats
return df_team
def roster(self, player = None):
roster_containter = self.soup.find("tbody")
roster_vals = roster_containter.find_all('tr')
data_list = []
for row in range(len(roster_vals)):
table_data = roster_vals[row].find_all("td")
data_list.append({table_data[data_row]['data-stat']
:table_data[data_row].get_text() for data_row in range(len(table_data))})
df_roster = pd.DataFrame(data=data_list)
if player:
return df_roster[df_roster['player'].str.contains(player)].T
return df_roster
def injury_report(self,roster_update=False):
injury_table = self.soup.find("table",id="injury")
inj_body = injury_table.find("tbody")
inj_data = inj_body.find_all("tr")
df_injury = pd.DataFrame({
"player": [inj_data[data].find("th").get_text()
for data in range(len(inj_data))],
"team": [inj_data[data].find_all("td")[0].get_text() for data in range(len(inj_data))],
"date": [inj_data[data].find_all("td")[1].get_text() for data in range(len(inj_data))],
"description": [inj_data[data].find_all("td")[2].get_text() for data in range(len(inj_data))]
})
if roster_update == True:
updated = df_injury['description'].apply(lambda x: 0 if 'OUT' in x.upper().split(' ') else 1)
df_injury.description = updated
return df_injury
return df_injury
def per_game(self,player = None):
per_game_table = self.soup.find("table", id="per_game")
table_body = per_game_table.find("tbody")
table_row = table_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']
:table_data[data_row].get_text() for data_row in range(len(table_data))})
df_per_game = pd.DataFrame(data=data_row)
for column in df_per_game.columns:
try:
df_per_game[column] = pd.to_numeric(df_per_game[column])
except:
pass
if player:
return df_per_game[df_per_game['player'].str.contains(player)].T
return df_per_game
def totals(self, player = None):
totals_table = self.soup.find("table", id="totals")
totals_body = totals_table.find("tbody")
table_row = totals_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_totals = pd.DataFrame(data=data_row)
for column in df_totals.columns:
try:
df_totals[column] = pd.to_numeric(df_totals[column])
except:
pass
if player:
return df_totals[df_totals['player'].str.contains(player)].T
return df_totals
def per_minute(self, player = None):
six_table = self.soup.find("table", id="per_minute")
six_body = six_table.find("tbody")
table_row = six_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_minutes = pd.DataFrame(data=data_row)
for column in df_minutes.columns:
try:
df_minutes[column] = pd.to_numeric(df_minutes[column])
except:
pass
if player:
return df_minutes[df_minutes['player'].str.contains(player)].T
return df_minutes
def per_poss(self, player = None):
poss_table = self.soup.find("table", id="per_poss")
poss_body = poss_table.find("tbody")
table_row = poss_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_poss = pd.DataFrame(data=data_row)
for column in df_poss.columns:
try:
df_poss[column] = pd.to_numeric(df_poss[column])
except:
pass
if player:
return df_poss[df_poss['player'].str.contains(player)].T
return df_poss
def advanced(self, player = None):
poss_table = self.soup.find("table", id="advanced")
poss_body = poss_table.find("tbody")
table_row = poss_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_poss = pd.DataFrame(data=data_row)
for column in df_poss.columns:
try:
df_poss[column] = pd.to_numeric(df_poss[column])
except:
pass
if player:
return df_poss[df_poss['player'].str.contains(player)].T
return df_poss
def shooting(self, player = None):
shooting_table = self.soup.find("table", id="shooting")
shooting_body = shooting_table.find("tbody")
table_row = shooting_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_shooting = pd.DataFrame(data=data_row)
for column in df_shooting.columns:
try:
df_shooting[column] = pd.to_numeric(df_shooting[column])
except:
pass
if player:
return df_shooting[df_shooting['player'].str.contains(player)].T
return df_shooting
def pbp(self, player = None):
pbp_table = self.soup.find("table", id="pbp")
pbp_body = pbp_table.find("tbody")
table_row = pbp_body.find_all("tr")
data_row = []
for row in range(len(table_row)):
table_data = table_row[row].find_all("td")
data_row.append({table_data[data_row]['data-stat']: table_data[data_row].get_text()
for data_row in range(len(table_data))})
df_pbp = pd.DataFrame(data=data_row)
for column in df_pbp.columns:
try:
df_pbp[column] = pd.to_numeric(df_pbp[column])
except:
pass
if player:
return df_pbp[df_pbp['player'].str.contains(player)].T
return df_pbp
def salaries(self, plater = None):
salaries_table = self.soup.find("table", id="salaries2")
salaries_body = salaries_table.find_all("tr")
sal_dict = {salaries_body[row].find("td",class_='left').get_text():salaries_body[row].find("td",class_='right').get_text()
for row in range(1,len(salaries_body))}
df_sal = pd.DataFrame(sal_dict, index=[0]).T
for column in df_sal.columns:
try:
df_sal[column] = pd.to_numeric(df_sal[column])
except:
pass
if player:
return df_sal[df_sal.index.str.contains(player)].T
return df_sal
def leader(self):
leader_container = self.soup.find("div",id="div_leaderboard")
leader_table = leader_container.find_all("div")
data_dict = {leader_table[row].find("caption",class_="poptip").get_text():leader_table[row].find("td",class_="single").get_text()
for row in range(len(leader_table))}
df_ranks = pd.DataFrame(data_dict,index=[0]).T
return df_ranks
def splits(self,type_split=None,split_row = None):
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}/splits".format(self.team, self.year)).text
self.soup = BeautifulSoup(
re.sub("<!--|-->", "", self.team_stat), "html.parser")
splits_table = self.soup.find("table", id="team_splits")
tr_tags = splits_table.find_all("tr")
new_dict = []
for row in range(len(tr_tags)):
try:
label = tr_tags[row].find("td",class_='left').get_text()
data = tr_tags[row].find_all('td',class_='right')
values = {data[line]['data-stat']:data[line].get_text() for line in range(len(data))}
new_dict.append({label:values})
except:
pass
df = pd.DataFrame({key:dic[key] for dic in new_dict for key in dic})
for column in df.columns:
try:
df[column] = pd.to_numeric(df[column])
except:
pass
if type_split and split_row:
return df[type_split].loc[split_row]
elif type_split:
return df[type_split]
else:
return df
def schedule(self,date=None,game=None):
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}_games.html".format(self.team, self.year)).text
self.soup = BeautifulSoup(
re.sub("<!--|-->", "", self.team_stat), "html.parser")
results_table = self.soup.find("table",id="games")
results_body = results_table.find("tbody")
results_row = results_body.find_all("tr")
new_dict = []
for num in range(len(results_row)):
try:
td_tags = results_row[num].find_all("td")
values = {td_tags[data]['data-stat']:td_tags[data].get_text() for data in range(len(td_tags))}
new_dict.append(values)
except:
pass
df = pd.DataFrame(data=new_dict)
df['date_game'] = pd.to_datetime(df['date_game'],format='%a, %b %d, %Y')
df[['opp_pts','pts']] = df[['opp_pts','pts']].apply(pd.to_numeric)
df['spread'] = df['pts'] - df['opp_pts']
df.drop(labels='box_score_text',inplace=True,axis=1)
df = df[pd.notnull(df['date_game'])]
if game:
return df[df['opp_name'].str.contains(game)]
if date:
return df[df.date_game==date]
else:
return df
def game_log(self,game=None):
#https://www.basketball-reference.com/teams/TOR/2020/gamelog/
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}/gamelog".format(self.team, self.year)).text
self.soup = BeautifulSoup(
re.sub("<!--|-->", "", self.team_stat), "html.parser")
game_table = self.soup.find('table',id='tgl_basic')
table_body = game_table.find('tbody')
tr_table = table_body.find_all('tr')
new_dict = []
try:
for row in range(len(tr_table)):
td_tags = tr_table[row].find_all('td')
values = {td_tags[data]['data-stat']:td_tags[data].get_text() for data in range(len(td_tags))}
new_dict.append(values)
except:
pass
df = pd.DataFrame(data=new_dict)
for column in df.columns:
try:
df[column] = pd.to_numeric(df[column])
except:
pass
df = df[pd.notnull(df['date_game'])]
df['date_game'] = pd.to_datetime(df['date_game'],format='%Y-%m-%d')
if game:
return df[df['opp_id'].str.contains(game)]
return df
def lineup(self):
#https://www.basketball-reference.com/teams/TOR/2020/lineups/
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}/lineups".format(self.team, self.year)).text
self.soup = BeautifulSoup(
re.sub("<!--|-->", "", self.team_stat), "html.parser")
lineup_table = self.soup.find_all("table")
table_5man = []
table_4man = []
table_2man = []
for table in lineup_table:
lineup_body = table.find("tbody")
tr_lineup = lineup_body.find_all("tr")
for row in range(len(tr_lineup)):
td_tags = tr_lineup[row].find_all("td")
values = {td_tags[data]['data-stat']:td_tags[data].get_text() for data in range(len(td_tags))}
if table['id'] == 'lineups_5-man_':
table_5man.append(values)
elif table['id'] == 'lineups_3-man_':
table_4man.append(values)
else:
table_2man.append(values)
df_5man = pd.DataFrame(data=table_5man)
df_4man = pd.DataFrame(data=table_4man)
df_2man = pd.DataFrame(data=table_2man)
return df_5man, df_4man, df_2man
def starting_lineup(self):
#https://www.basketball-reference.com/teams/TOR/2020_start.html
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}_start.html".format(self.team, self.year)).text
self.soup = BeautifulSoup(
re.sub("<!--|-->", "", self.team_stat), "html.parser")
tables = self.soup.find_all('table')
table_starting = []
table_summary = []
for table in tables:
lineup_body = table.find("tbody")
tr_lineup = lineup_body.find_all("tr")
for row in range(len(tr_lineup)):
td_tags = tr_lineup[row].find_all("td")
values = {td_tags[data]['data-stat']:td_tags[data].get_text() for data in range(len(td_tags))}
if table['id'] == 'starting_lineups_po0':
table_starting.append(values)
else:
table_summary.append(values)
df_starting = | pd.DataFrame(data=table_starting) | pandas.DataFrame |
import logging
import shutil
import time
import pandas as pd
import requests
from requests_futures.sessions import FuturesSession
class Namara:
def __init__(self, api_key, debug=False, host='https://api.namara.io', api_version='v0'):
self.api_key = api_key
self.debug = debug
self.host = host
self.api_version = api_version
self.base_path = '{0}/{1}'.format(self.host, self.api_version)
self.headers = {'Content-Type': 'application/json', 'X-API-Key': api_key}
def export(self, dataset_id, organization_id, options=None, output_format='url', output_file=None):
url = self.get_url(f'/data_sets/{dataset_id}/data/export?geometry_format=wkt&organization_id={organization_id}')
while True:
response = self.__session.get(url, params=options, headers=self.headers).result().json()
if self.debug:
logging.debug('Response Object: ' + response)
if 'message' in response and response['message'] == 'Exported':
if output_format == 'url':
return response['url']
elif output_format == 'dataframe':
list_of_chunks = []
for chunk in pd.read_csv(response['url'], chunksize=100):
list_of_chunks.append(chunk)
return | pd.concat(list_of_chunks) | pandas.concat |
import logging
from os.path import splitext
import pandas as pd
import json
from six import string_types
from traits.api import Dict, Instance, Str
from .base_report_element import BaseReportElement
logger = logging.getLogger(__name__)
class PlotReportElement(BaseReportElement):
"""
"""
#: Type of element to create
element_type = Str("plot")
#: Plot description, following the Vega-Lite specifications
plot_desc = Dict
#: Data to be plotted, loaded into a DataFrame
source_data = Instance(pd.DataFrame)
def __init__(self, **traits):
if isinstance(traits.get("plot_desc", {}), string_types):
traits["plot_desc"] = json.load(traits["plot_desc"])
super(PlotReportElement, self).__init__(**traits)
if self.source_data is None:
data_info = self.plot_desc.pop("data", {})
if "url" in data_info:
data_url = data_info["url"]
if splitext(data_url)[1] == ".h5":
self.source_data = pd.read_hdf(data_url)
if splitext(data_url)[1] == ".csv":
self.source_data = pd.read_csv(data_url)
elif "values" in data_info:
self.source_data = | pd.DataFrame(data_info["values"]) | pandas.DataFrame |
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
__copyright__ = "Copyright (c) 2017, Intel Research and Development Ireland Ltd."
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pandas
from analytics_engine.heuristics.beans.infograph import InfoGraphNode
from analytics_engine import common
LOG = common.LOG
class SnapUtils(object):
@staticmethod
def annotate_machine_pu_util(internal_graph, node):
source = InfoGraphNode.get_machine_name_of_pu(node)
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_compute_utilization(machine)
if 'intel/use/compute/utilization' not in machine_util.columns:
sum_util = None
cpu_metric = 'intel/procfs/cpu/utilization_percentage'
pu_util_df = InfoGraphNode.get_compute_utilization(node)
if cpu_metric in pu_util_df.columns:
pu_util = pu_util_df[cpu_metric]
pu_util = pu_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = pu_util
InfoGraphNode.set_compute_utilization(machine, machine_util)
else:
LOG.info('CPU util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def annotate_machine_disk_util(internal_graph, node):
source = InfoGraphNode.get_attributes(node)['allocation']
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_disk_utilization(machine)
if 'intel/use/disk/utilization' not in machine_util.columns:
disk_metric = 'intel/procfs/disk/utilization_percentage'
disk_util_df = InfoGraphNode.get_disk_utilization(node)
if disk_metric in disk_util_df.columns:
disk_util = disk_util_df[disk_metric]
disk_util = disk_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = disk_util
InfoGraphNode.set_disk_utilization(machine, machine_util)
else:
LOG.info('Disk util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use disk for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def annotate_machine_network_util(internal_graph, node):
source = InfoGraphNode.get_attributes(node)['allocation']
machine = InfoGraphNode.get_node(internal_graph, source)
machine_util = InfoGraphNode.get_network_utilization(machine)
if 'intel/use/network/utilization' not in machine_util.columns:
net_metric = 'intel/psutil/net/utilization_percentage'
net_util_df = InfoGraphNode.get_network_utilization(node)
if net_metric in net_util_df.columns:
net_util = net_util_df[net_metric]
net_util = net_util.fillna(0)
machine_util[InfoGraphNode.get_attributes(node)['name']] = net_util
InfoGraphNode.set_network_utilization(machine, machine_util)
else:
LOG.info('Net util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
else:
LOG.debug('Found use network for node {}'.format(InfoGraphNode.get_name(node)))
@staticmethod
def utilization(internal_graph, node, telemetry):
# machine usage
telemetry_data = telemetry.get_data(node)
if 'intel/use/compute/utilization' in telemetry_data:
InfoGraphNode.set_compute_utilization(node,
pandas.DataFrame(telemetry_data['intel/use/compute/utilization'],
columns=['intel/use/compute/utilization']))
# pu usage
if 'intel/procfs/cpu/utilization_percentage' in telemetry_data:
InfoGraphNode.set_compute_utilization(node,
pandas.DataFrame(
telemetry_data['intel/procfs/cpu/utilization_percentage'],
columns=['intel/procfs/cpu/utilization_percentage']))
if 'intel/use/memory/utilization' in telemetry_data:
InfoGraphNode.set_memory_utilization(node, pandas.DataFrame(telemetry_data['intel/use/memory/utilization']))
if 'intel/use/disk/utilization' in telemetry_data:
InfoGraphNode.set_disk_utilization(node, pandas.DataFrame(telemetry_data['intel/use/disk/utilization']))
if 'intel/use/network/utilization' in telemetry_data:
InfoGraphNode.set_network_utilization(node,
pandas.DataFrame(telemetry_data['intel/use/network/utilization']))
# supporting not available /use/ metrics
if 'intel/procfs/meminfo/mem_total' in telemetry_data and 'intel/procfs/meminfo/mem_used' in telemetry_data:
# LOG.info('Found memory utilization procfs')
mem_used = telemetry_data['intel/procfs/meminfo/mem_used'].fillna(0)
mem_total = telemetry_data['intel/procfs/meminfo/mem_total'].fillna(0)
mem_util = mem_used * 100 / mem_total
mem_util.name = 'intel/procfs/memory/utilization_percentage'
InfoGraphNode.set_memory_utilization(node, | pandas.DataFrame(mem_util) | pandas.DataFrame |
# demographics_etl.py
#######
# This class provides capabilities to extract, transform,
# and load data from student, staff, and school geographic
# data files that it downloads from the web.
######
import pandas as pd
import numpy as np
import os
import datetime
import urllib
import shutil
import logging
import pyodbc
import pypyodbc
import sqlalchemy as sa
import keyring
import yaml
import pprint as pp
import time
class DemographicsETL():
def __init__(self,config_file_path,log_folder_name):
"""
Initialize ETL process by preparing logging,
reading in configuration file, and
creating a data files folder, if it does not yet
exist.
"""
pd.options.mode.chained_assignment = None
self.setup_logging(folder_name=log_folder_name)
config_map = self.load_configuration(config_file_path)
self.create_folder(folder_name=self.datafiles_folder)
def load_configuration(self,file_path):
"""
Load data from the configuration file from the specified path
into instance variables.
Keyword arguments:
file_path - the path to the configuration file from
the current folder.
"""
try:
logging.info("Using configuration file {}".format(file_path))
file = open(file_path)
config_map = yaml.load(file)
file.close()
# Set instance variables with settings from configuration file.
self.datafiles_folder = config_map['Data Folder Name']
self.database_name = config_map['Database']['Name']
self.database_driver = config_map['Database']['Driver']
self.database_server = config_map['Database']['Server']
self.database_username = config_map['Database']['Username']
self.database_schema = config_map['Database']['Schema']
self.student_demographics_url = config_map['Student Demographics URL']
self.staff_demographics_url = config_map['Staff Demographics URL']
self.school_geography_url = config_map['School Geography URL']
self.source_staff_table = config_map['Source Staff Table Name']
self.staging_student_table = config_map['Staging Student Table Name']
self.staging_staff_table = config_map['Staging Staff Table Name']
self.staging_school_geography_table = config_map['Staging School Geography Table Name']
self.school_district_ids = config_map['School District IDs']
self.n_less_than_10 = config_map['Replacement for n<10']
self.more_than_95 = config_map['Replacement for >95%']
except IOError:
logging.error("Unable to read configuration from file. Exiting program.")
exit(1)
except KeyError as key:
logging.error("Key missing from configuration from file: {}. Exiting program.".format(key))
exit(1)
except:
logging.error("Unknown configuration file error. Exiting program.")
exit(1)
logging.info('Configuration has been loaded.')
return config_map
def setup_logging(self,folder_name):
"""
Create a folder to store the log, if one does not yet exist,
then initialize the logger for logging to both the console
and the file in the log folder.
Keyword arguments:
folder_name - the name of the folder for storing the log file
"""
# Create folder to store log file if folder does not exist already
self.create_folder(folder_name)
# Configure logger with more verbose format to write to log file
log_file_path = folder_name+'/'+'demographics_etl.log'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d-%Y %H:%M:%S',
filename=log_file_path,
filemode='w')
# Create the logger
console = logging.StreamHandler()
# Write to console any messages that are info or higher priority
console.setLevel(logging.INFO)
# Specify simpler format to write to console
formatter = logging.Formatter('%(levelname)-8s %(message)s')
# Assign the format for console
console.setFormatter(formatter)
# Add the handler to the root logger
logging.getLogger('').addHandler(console)
# Start logging
logging.info('Starting demographics_etl.py.')
logging.info("Logging has been set up with log file located at {}.".format(log_file_path))
def create_folder(self,folder_name):
"""
Create the specified folder if it does not yet exists.
Keyword arguments:
folder_name - the name of the folder to create
"""
# Create folder to store files if folder does not already exist
os.makedirs(folder_name,exist_ok=True)
def download_data(self):
"""
Download student and staff demographic data and school
geographic data from URLs defined in instance variables,
assigned based on the configuration file settings.
"""
self.engine = self.connect_database()
self.student_demographics_file = self.staff_demographics_file = ''
if self.student_demographics_url:
self.student_demographics_file = self.download_file(self.student_demographics_url)
if self.staff_demographics_url:
self.staff_demographics_file = self.download_file(self.staff_demographics_url)
if self.school_geography_url:
self.school_geography_file = self.download_file(self.school_geography_url)
def download_file(self,url):
"""
Download the file from the url provided and save it locally to the folder for data files
using its original file name. Any existing file with that file name and location will be
overwritten.
Keyword arguments:
url - the URL of the file to download
"""
output_filepath = self.datafiles_folder + '/' + url[url.rfind("/")+1:]
# Download the file from the url and save it to the data files folder.
try:
with urllib.request.urlopen(url) as response, open(output_filepath, 'wb') as output_file:
shutil.copyfileobj(response, output_file)
except:
logging.error("Unable to download file from {}. Exiting program.".format(url))
exit(1)
logging.info("Downloaded file to {}".format(output_filepath))
return output_filepath
def connect_database(self):
"""
Acquire the database password using keyring and prepare a connection to the database
used for storing demographic information.
"""
# Get password from keyring
password = keyring.get_password(self.database_name, self.database_username)
# Connect to the database
params = urllib.parse.quote_plus("DRIVER={{{0}}};SERVER={1};DATABASE={2};UID={3};PWD={4};autocommit=True;".format(self.database_driver,
self.database_server,self.database_name, self.database_username,password))
try:
engine = sa.create_engine("mssql+pyodbc:///?odbc_connect={}".format(params))
logging.info("Prepared connection to {} database.".format(self.database_name))
except:
logging.error("Unable to prepare connection to {} database. Exiting program.".format(self.database_name))
exit(1)
return engine
def extract_data(self):
"""
Call methods to extract student and staff demographic information
and school geographic information from downloaded source files.
"""
self.extract_student_demographics_data()
self.extract_staff_demographics_data()
self.extract_school_geography_data()
def extract_student_demographics_data(self):
"""
Extract data from student demographics file, a tab-delimited text file,
to a Pandas dataframe for further processing.
"""
try:
self.student_demographics_df = pd.read_table(self.student_demographics_file, sep='\t', header=0, index_col=False)
except:
logging.error("Unable to read file from {}. Exiting program.".format(self.student_demographics_file))
exit(1)
logging.info("Extracted student demographics data from file {file}. {df} rows of data found.".format(file=self.student_demographics_file,
df = self.student_demographics_df.shape[0]))
def extract_staff_demographics_data(self):
"""
Extract data from staff demographics file, which is an Access database,
to a Pandas dataframe for further processing.
"""
connection_string = "DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={0}/{1}".format(os.getcwd().replace('\\','/'),self.staff_demographics_file)
logging.info("Attempting to connect to staff demographics Access database with the following connection: {}".format(connection_string))
connection = pypyodbc.connect(connection_string)
quoted_district_ids = ','.join(map("'{}'".format, self.school_district_ids))
query = (r"SELECT SchoolYear,codist,cert,sex,hispanic,race,hdeg,certfte,certflag,recno,prog,act,bldgn,asspct,assfte,yr "
r"FROM [{source_table}] "
r"WHERE act = '27' " # Activity code 27 means a teaching assignment
r"AND assfte > 0 " # Must be at least part of the staff member's assignment FTE
r"AND codist IN ({district_ids});".format(source_table=self.source_staff_table,district_ids=quoted_district_ids))
try:
self.staff_demographics_df = pd.read_sql(query, connection)
except:
logging.error("Unable to extract staff data from {}. Exiting program.".format(self.staff_demographics_file))
exit(1)
logging.info("Extracted staff demographics data. {} rows of data found.".format(self.staff_demographics_df.shape[0]))
def extract_school_geography_data(self):
"""
Extract data from school geography file, a comma-separated values (CSV) file,
to a Pandas dataframe for further processing.
"""
try:
self.school_geography_df = | pd.read_table(self.school_geography_file, sep=',', header=0, index_col=False) | pandas.read_table |
#!/usr/bin/python3
"""D-Cube Plotting."""
# Example:
#
# ./dcube.py --suite="rpludp" --x="SF" --y="reliability" --start=1 --end=5 --title="test" --out=home/mike/test
import pandas as pd
import baddplotter as bplot
import matplotlib.pyplot as plt # general plotting
import seaborn as sns
import numpy as np
import argparse
from ast import literal_eval
# Pandas options
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 50)
pd.set_option('display.max_columns', 20)
| pd.set_option('display.width', 1000) | pandas.set_option |
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import pandas as pd
import string
from scipy.sparse import hstack
from scipy import sparse
def load_data(filepath):
df = pd.read_csv(filepath)
return(df)
def vectorize_data(train_df, test_df):
vectorizer = CountVectorizer(max_df=0.95, min_df=13, ngram_range=(1, 1))
# apply transformation
train_x = vectorizer.fit_transform(train_df['final_clean'])
tf_feature_names = vectorizer.get_feature_names()
test_x = vectorizer.transform(test_df.Quotes)
return(train_x, test_x)
def feature_engineering(train_df, test_df):
train_df['char_count'] = train_df['Quotes'].apply(len)
train_df['word_count'] = train_df['Quotes'].apply(lambda x: len(x.split()))
train_df['word_density'] = train_df['char_count']/(train_df['word_count'] + 1)
train_df['punctuation_count'] = train_df['Quotes'].apply(lambda x: len("".join(_ for _ in x if _ in string.punctuation)))
train_df['title_word_count'] = train_df['Quotes'].apply(lambda x: len([word for word in x.split() if word.istitle()]))
train_df['upper_case_word_count'] = train_df['Quotes'].apply(lambda x: len([word for word in x.split() if word.isupper()]))
test_df['char_count'] = test_df['Quotes'].apply(len)
test_df['word_count'] = test_df['Quotes'].apply(lambda x: len(x.split()))
test_df['word_density'] = test_df['char_count']/(test_df['word_count']+1)
test_df['punctuation_count'] = test_df['Quotes'].apply(lambda x: len("".join(_ for _ in x if _ in string.punctuation)))
test_df['title_word_count'] = test_df['Quotes'].apply(lambda x: len([word for word in x.split() if word.istitle()]))
test_df['upper_case_word_count'] = test_df['Quotes'].apply(lambda x: len([word for word in x.split() if word.isupper()]))
num_features = [f_ for f_ in train_df.columns\
if f_ in ["char_count", "word_count", "word_density", 'punctuation_count','title_word_count', 'upper_case_word_count']]
for f in num_features:
all_cut = pd.cut( | pd.concat([train_df[f], test_df[f]], axis=0) | pandas.concat |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import pycountry_convert as pc
import pycountry
import os
from iso3166 import countries
PATH_AS_RELATIONSHIPS = '../Datasets/AS-relationships/20210701.as-rel2.txt'
NODE2VEC_EMBEDDINGS = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
DEEPWALK_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/DeepWalk_128.csv'
DIFF2VEC_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Diff2Vec_128.csv'
NETMF_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NetMF_128.csv'
NODESKETCH_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NodeSketch_128.csv'
WALKLETS_EMBEDDINGS_256 = '../Check_for_improvements/Embeddings/Walklets_256.csv'
NODE2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
NODE2VEC_LOCAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_p2_64.csv'
NODE2VEC_GLOBAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_q2_64.csv'
DIFF2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Diff2Vec_64.csv'
NETMF_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NetMF_64.csv'
NODESKETCH_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NodeSketch_64.csv'
NODE2VEC_WL5_E3_LOCAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_local.csv'
NODE2VEC_WL5_E3_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_global.csv'
NODE2VEC_64_WL5_E1_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_global.csv'
BGP2VEC_64 = '../Check_for_improvements/Embeddings/Node2Vec_bgp2Vec.csv'
BGP2VEC_32 = '../Check_for_improvements/Embeddings/BGP2VEC_32'
WALKLETS_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Walklets_128.csv'
STORE_CSV_TO_FOLDER = '../Embeddings_Visualization/StorePreprocessedEmb'
def country_flag(data):
"""
:param data: Contains a dataframe combining 3 datasets
:param list_alpha_2: Contains the 2-letter abbreviation from each country
:return: Matches the acronyms with the Fullname of the countries
"""
list_alpha_2 = [i.alpha2 for i in list(countries)]
if data['AS_rank_iso'] in list_alpha_2:
return pycountry.countries.get(alpha_2=data['AS_rank_iso']).name
else:
return 'Unknown Code'
def country_to_continent(country_name):
"""
This function takes as input a country name and returns the continent that the given country belongs.
:param country_name: Contains the name of a country
:return: The continent
"""
try:
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
return country_continent_name
except:
return np.nan
def convert_country_to_continent(data):
"""
The function converts iso = alpha_2 (example: US) to the whole name of the country. Needs (import iso3166)
:param data: Contains a dataframe combining 4 datasets
:return: The continent for each country
"""
data['AS_rank_iso'] = data.apply(country_flag, axis=1)
temp_list = []
for i in range(0, len(data)):
temp_list.append(country_to_continent(data['AS_rank_iso'][i]))
df = pd.DataFrame(temp_list, columns=['AS_rank_iso'])
data['AS_rank_iso'] = df['AS_rank_iso']
return data['AS_rank_iso']
def merge_datasets(final_df, embeddings_df):
"""
:param final_df: Its the dataset that is generated in Analysis/aggregate_data folder
:param embeddings_df: Contains pretrained embeddings
:return: A new merged dataset (containing improvement_score and the embedding of each ASN)
"""
print(final_df['ASN'].isin(embeddings_df['ASN']).value_counts())
mergedStuff = pd.merge(embeddings_df, final_df, on=['ASN'], how='left')
mergedStuff.replace('', np.nan, inplace=True)
return mergedStuff
def get_path_and_filename(model, dimensions):
"""
:param model: The model's name
:param dimensions: The number of dimensions of the given model
:return: The path where the script will be stored and its name
"""
file_name = 'Preprocessed' + str(model) + str(dimensions) + f'.csv'
outdir = STORE_CSV_TO_FOLDER
if not os.path.exists(outdir):
os.mkdir(outdir)
full_name = os.path.join(outdir, file_name)
return full_name
def read_Node2Vec_embeddings_file():
"""
:return: A dataframe containing the ASNs and the embeddings of each ASn created based on Node2Vec algorithm.
"""
emb_df = pd.read_table(NODE2VEC_EMBEDDINGS, skiprows=1, header=None, sep=" ")
# name the columns
rng = range(0, 65)
new_cols = ['dim_' + str(i) for i in rng]
emb_df.columns = new_cols
# rename first column
emb_df.rename(columns={'dim_0': 'ASN'}, inplace=True)
return emb_df
def read_karateClub_embeddings_file(emb, dimensions):
"""
Karateclub library requires nodes to be named with consecutive Integer numbers. In the end gives as an output
containing the embeddings in ascending order. So in this function we need to reassign each ASN to its own embedding.
:param emb: A dataset containing pretrained embeddings
:param dimensions: The dimensions of the given dataset
:return: A dataframe containing pretrained embeddings
"""
if dimensions == 64:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_64, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_64, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_64, sep=',')
elif emb == 'Walklets':
df = pd.read_csv(WALKLETS_EMBEDDINGS_128, sep=',')
elif emb == 'Node2Vec_Local':
df = pd.read_csv(NODE2VEC_LOCAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_Global':
df = pd.read_csv(NODE2VEC_GLOBAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_wl5_global':
df = pd.read_csv(NODE2VEC_64_WL5_E1_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_global':
df = pd.read_csv(NODE2VEC_WL5_E3_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_local':
df = pd.read_csv(NODE2VEC_WL5_E3_LOCAL, sep=',')
elif emb == 'bgp2vec_64':
df = | pd.read_csv(BGP2VEC_64, sep=',') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import spotipy
import spotipy.util as util
import os
import sys
import requests
from dotenv import load_dotenv, find_dotenv
from spotipy.client import SpotifyException
from spotipy.oauth2 import SpotifyOAuth
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import pickle
from pickle import dump
import joblib
# TODO: Build more user friendly structure for user input
# TODO: ASYNC for many playlists
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
# Spotify variables
USERNAME = os.environ.get("SPOTIFY_EMAIL")
SPOTIFY_ID = os.environ.get("SPOTIPY_CLIENT_ID")
SPOTIFY_SECRET = os.environ.get("SPOTIPY_CLIENT_SECRET")
CACHE = os.environ.get("CACHE")
# Feature list
feature_list = [
'danceability','energy', 'loudness',
'speechiness', 'acousticness', 'instrumentalness',
'liveness', 'valence', 'tempo']
# Exclude list
exclude_list = [
'track_name', 'artist_name', 'duration_ms',
'track_href', 'uri', 'time_signature', 'id',
'type', 'analysis_url', 'mode','key']
def connect_spotify():
"""Connects to Spotify API.
Raises:
SpotifyException: When the inputs failt to authenticate with Spotify.
"""
try:
scope='playlist-read-private, playlist-modify-private, playlist-modify-public'
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope, cache_path=CACHE))
return sp
except SpotifyException as e:
print(f"{e}: Failed to connect to Spotify API.")
def get_playlist_metadata(sp, n):
"""Gets user's current playlist catalogue.
Raises:
SpotifyException: When query fails to gather playlist metadata.
"""
try:
playlists = sp.current_user_playlists(limit=n)
return playlists
except SpotifyException as e:
print(f"{e}: Failed to gather current user playlists.")
def generate_dataset(sp, playlists, search, save_as):
"""Gathers playlist(s) based on input search
"""
tracks_df = pd.DataFrame()
print("Gathering playlist track data")
print('-'*30)
while playlists:
for _, playlist in enumerate(playlists['items']):
if search in playlist['name']:
print("%s" % (playlist['name']))
tracks = sp.playlist_tracks(playlist['uri'])
if "." in search:
current_volume = playlist['name'].split('.')[1]
else:
current_volume = 1
for j, track in enumerate(tracks['items']):
track_data={}
track_data['volume'] = current_volume
track_data['position'] = j + 1
track_data['track_name'] = track['track']['name']
track_data['artist_name'] = track['track']['artists'][0]['name']
track_features = sp.audio_features(track['track']['id'])[0]
track_data.update(track_features)
stage = pd.DataFrame(track_data, index=[0])
tracks_df = tracks_df.append(stage, ignore_index=True)
if playlists['next']:
playlists = sp.next(playlists)
else:
playlists = None
tracks_df.to_csv("data/" + save_as)
return tracks_df
def standardize(df, feature_list, exclude_list):
"""Fit and save StandardScaler and PCA
"""
df.drop(columns=exclude_list, inplace=True)
standard_scaler = StandardScaler()
standard_features = standard_scaler.fit_transform(df[feature_list])
processed_df = | pd.DataFrame(standard_features, index=df.index, columns=df.columns[2:]) | pandas.DataFrame |
"""
Created on Mon Feb 22 15:52:51 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
import os
import pickle
import calendar
import time
import warnings
from pyproj import Transformer
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from requests import get
import dataframe_key
def compile_chicago_stations():
"""
Reads data files containing information about docking stations in Chicago
and compiles the data into a dataframe. The dataframe is then saved as a
pickle for further use.
The relevant files can be found at:
https://divvy-tripdata.s3.amazonaws.com/index.html
https://data.cityofchicago.org/Transportation/Divvy-Bicycle-Stations-All-Map/bk89-9dk7
Raises
------
FileNotFoundError
Raised if no data files containing station data are found.
Returns
-------
stat_df : pandas DataFrame
Dataframe of all docking station information.
"""
try:
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
except FileNotFoundError as exc:
print('No pickle found. Creating pickle...')
stat_files = [file for file in os.listdir('data') if 'Divvy_Stations' in file]
col_list = ['id', 'name', 'latitude', 'longitude']
key = {'ID':'id', 'Station Name':'name', 'Latitude':'latitude','Longitude':'longitude'}
try:
stat_df = pd.read_csv(
'data/Divvy_Bicycle_Stations_-_All_-_Map.csv').rename(columns = key)
stat_df = stat_df[col_list]
except FileNotFoundError:
stat_df = pd.DataFrame(columns = col_list)
for file in stat_files:
df = pd.read_csv(f'./data/{file}')[col_list]
stat_df = pd.concat([stat_df, df], sort = False)
if stat_df.size == 0:
raise FileNotFoundError(
'No data files containing station data found. Please read the docstring for more information.') from exc
stat_df.drop_duplicates(subset = 'name', inplace = True)
with open('./python_variables/Chicago_stations.pickle', 'wb') as file:
pickle.dump(stat_df, file)
print('Pickle loaded')
return stat_df
def get_JC_blacklist():
"""
Constructs/updates a blacklist of stations in Jersey City area. The
blacklist is created using historical biketrip datasets for the area.
Use only if you know what you are doing.
The relevant files can be found at:
https://www.citibikenyc.com/system-data
Raises
------
FileNotFoundError
Raised if no Jersey City dataset is found.
Returns
-------
blacklist : list
List of IDs of the Jersey City docking stations.
"""
try:
with open('./python_variables/JC_blacklist', 'rb') as file:
blacklist = pickle.load(file)
except FileNotFoundError:
print('No previous blacklist found. Creating blacklist...')
blacklist = set()
JC_files = [file for file in os.listdir('data') if 'JC' in file]
if len(JC_files) == 0:
raise FileNotFoundError(
'No JC files found. Please have a JC file in the data directory to create/update blacklist.')
for file in JC_files:
df = pd.read_csv('data/' + file)
df = df.rename(columns = dataframe_key.get_key('nyc'))
JC_start_stat_indices = df.loc[df['start_stat_long'] < 74.02]
JC_end_stat_indices = df.loc[df['end_stat_long'] < 74.02]
stat_IDs = set(
df['start_stat_id'][JC_start_stat_indices]) | set(df['end_stat_id'][JC_end_stat_indices])
blacklist = blacklist | stat_IDs
with open('./python_variables/JC_blacklist', 'wb') as file:
pickle.dump(blacklist, file)
print('Blacklist updated')
return blacklist
def days_index(df):
"""
Find indices of daily trips.
Parameters
----------
df : pandas DataFrame
Dataframe containing bikeshare trip data with columns that have been
renamed to the common key.
Returns
-------
d_i : dict
Contains the indices of the first trip per day.
"""
days = df['start_dt'].dt.day
d_i = [(days == i).idxmax() for i in range(1, max(days)+1)]
return dict(zip(range(1, max(days)+1), d_i))
def pickle_data(df, city, year, month):
"""
Generate pickle of days' starting indices.
Parameters
----------
df : pandas DataFrame
bikeshare trip data with columns that have been renamed to the common
key.
city : str
The identification of the city. For a list of supported cities, see
the documentation for the Data class.
year : int
The year of interest in YYYY format.
month : int
The month of interest in MM format.
Returns
-------
d : dict
Contains the indices of the first trip per day.
"""
d = days_index(df)
with open(f'./python_variables/day_index_{city}{year:d}{month:02d}.pickle', 'wb') as file:
pickle.dump(d, file)
return d
def get_data(city, year, month, blacklist=None):
"""
Read data from csv files.
Parameters
----------
city : str
The identification of the city. For a list of supported cities, see
the documentation for the Data class.
year : int
The year of interest in YYYY format.
month : int
The month of interest in MM format.
blacklist : list, optional
List of IDs of stations to remove. Default is None.
Returns
-------
df : pandas DataFrame
Dataframe containing bikeshare trip data.
days : dict
Contains the indices of the first trip per day.
"""
supported_cities = ['nyc', 'sfran', 'sjose',
'washDC', 'chic', 'london',
'oslo', 'edinburgh', 'bergen',
'buenos_aires', 'madrid',
'mexico', 'taipei'] # Remember to update this list
if city not in supported_cities:
raise ValueError("This city is not currently supported. Supported cities are {}".format(supported_cities))
# Make folder for dataframes if not found
if not os.path.exists('python_variables/big_data'):
os.makedirs('python_variables/big_data')
try:
with open(f'./python_variables/big_data/{city}{year:d}{month:02d}_dataframe_blcklst={blacklist}.pickle', 'rb') as file:
df = pickle.load(file)
print('Pickle loaded')
except FileNotFoundError:
print('No dataframe pickle found. Pickling dataframe...')
if city == "nyc":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-citibike-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.citibikenyc.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
try:
with open('./python_variables/JC_blacklist', 'rb') as file:
JC_blacklist = pickle.load(file)
df = df[~df['start_stat_id'].isin(JC_blacklist)]
df = df[~df['end_stat_id'].isin(JC_blacklist)]
except FileNotFoundError:
print('No JC blacklist found. Continuing...')
df.dropna(inplace=True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "washDC":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-capitalbikeshare-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.capitalbikeshare.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
stat_df = pd.read_csv('data/Capital_Bike_Share_Locations.csv')
for _ , stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_id'] == stat['TERMINAL_NUMBER'])
end_matches = np.where(df['end_stat_id'] == stat['TERMINAL_NUMBER'])
df.at[start_matches[0], 'start_stat_lat'] = stat['LATITUDE']
df.at[start_matches[0], 'start_stat_long'] = stat['LONGITUDE']
df.at[end_matches[0], 'end_stat_lat'] = stat['LATITUDE']
df.at[end_matches[0], 'end_stat_long'] = stat['LONGITUDE']
df.replace('', np.nan, inplace = True)
df.dropna(inplace=True)
max_lat = 38.961029
min_lat = 38.792686
max_long= -76.909415
min_long= -77.139396
df = df.iloc[np.where(
(df['start_stat_lat'] < max_lat) &
(df['start_stat_lat'] > min_lat) &
(df['start_stat_long'] < max_long) &
(df['start_stat_long'] > min_long))]
df = df.iloc[np.where(
(df['end_stat_lat'] < max_lat) &
(df['end_stat_lat'] > min_lat) &
(df['end_stat_long'] < max_long) &
(df['end_stat_long'] > min_long))]
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "chic":
q = int(np.ceil(month/3))
try:
df = pd.read_csv(f'./data/Divvy_Trips_{year:d}_Q{q}.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.divvybikes.com/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
n_days = calendar.monthrange(year, month)[1]
df = df.iloc[np.where(df['start_t'] > f'{year:d}-{month:02d}-01 00:00:00')]
df = df.iloc[np.where(df['start_t'] < f'{year:d}-{month:02d}-{n_days} 23:59:59')]
df.reset_index(inplace = True, drop = True)
df['start_stat_lat'] = ''
df['start_stat_long'] = ''
df['end_stat_lat'] = ''
df['end_stat_long'] = ''
try:
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
except FileNotFoundError as exc:
compile_chicago_stations()
with open('./python_variables/Chicago_stations.pickle', 'rb') as file:
stat_df = pickle.load(file)
for _, stat in stat_df.iterrows():
start_matches = np.where(df['start_stat_name'] == stat['name'])
end_matches = np.where(df['end_stat_name'] == stat['name'])
df.at[start_matches[0], 'start_stat_lat'] = stat['latitude']
df.at[start_matches[0], 'start_stat_long'] = stat['longitude']
df.at[end_matches[0], 'end_stat_lat'] = stat['latitude']
df.at[end_matches[0], 'end_stat_long'] = stat['longitude']
df.replace('', np.nan, inplace = True)
df.dropna(subset = ['start_stat_lat',
'start_stat_long',
'end_stat_lat',
'end_stat_long'], inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
df['duration'] = df['duration'].str.replace(',', '').astype(float)
elif city == "sfran":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-baywheels-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.lyft.com/bikes/bay-wheels/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df = df.iloc[np.where(df['start_stat_lat'] > 37.593220)]
df = df.iloc[np.where(df['end_stat_lat'] > 37.593220)]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "sjose":
try:
df = pd.read_csv(f'./data/{year:d}{month:02d}-baywheels-tripdata.csv')
except FileNotFoundError as exc:
raise FileNotFoundError('No trip data found. All relevant files can be found at https://www.lyft.com/bikes/bay-wheels/system-data') from exc
df = df.rename(columns = dataframe_key.get_key(city))
df.dropna(inplace=True)
df = df.iloc[np.where(df['start_stat_lat'] < 37.593220)]
df = df.iloc[np.where(df['end_stat_lat'] < 37.593220)]
df.sort_values(by = 'start_t', inplace = True)
df.reset_index(inplace = True, drop = True)
df['start_dt'] = pd.to_datetime(df['start_t'])
df['end_dt'] = pd.to_datetime(df['end_t'])
elif city == "london":
month_dict = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May',
6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct',
11:'Nov', 12:'Dec'}
data_files = [file for file in os.listdir('data') if 'JourneyDataExtract' in file]
data_files = [file for file in data_files if '{}'.format(year)
and '{}'.format(month_dict[month]) in file]
if len(data_files) == 0:
raise FileNotFoundError('No London data for {}. {} found. All relevant files can be found at https://cycling.data.tfl.gov.uk/.'.format(month_dict[month], year))
if isinstance(data_files, str):
warnings.warn('Only one data file found. Please check that you have all available data.')
df = pd.read_csv('./data/' + data_files[0])
for file in data_files[1:]:
df_temp = | pd.read_csv('./data/' + file) | pandas.read_csv |
#!/usr/bin/env python
"""
@author: cdeline
bifacial_radiance.py - module to develop radiance bifacial scenes, including gendaylit and gencumulativesky
7/5/2016 - test script based on G173_journal_height
5/1/2017 - standalone module
Pre-requisites:
This software is written for Python >3.6 leveraging many Anaconda tools (e.g. pandas, numpy, etc)
*RADIANCE software should be installed from https://github.com/NREL/Radiance/releases
*If you want to use gencumulativesky, move 'gencumulativesky.exe' from
'bifacial_radiance\data' into your RADIANCE source directory.
*If using a Windows machine you should download the Jaloxa executables at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml#Download
* Installation of bifacial_radiance from the repo:
1. Clone the repo
2. Navigate to the directory using the command prompt
3. run `pip install -e . `
Overview:
Bifacial_radiance includes several helper functions to make it easier to evaluate
different PV system orientations for rear bifacial irradiance.
Note that this is simply an optical model - identifying available rear irradiance under different conditions.
For a detailed demonstration example, look at the .ipnyb notebook in \docs\
There are two solar resource modes in bifacial_radiance: `gendaylit` uses hour-by-hour solar
resource descriptions using the Perez diffuse tilted plane model.
`gencumulativesky` is an annual average solar resource that combines hourly
Perez skies into one single solar source, and computes an annual average.
bifacial_radiance includes five object-oriented classes:
RadianceObj: top level class to work on radiance objects, keep track of filenames,
sky values, PV module type etc.
GroundObj: details for the ground surface and reflectance
SceneObj: scene information including array configuration (row spacing, clearance or hub height)
MetObj: meteorological data from EPW (energyplus) file.
Future work: include other file support including TMY files
AnalysisObj: Analysis class for plotting and reporting
"""
import logging
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
import os, datetime
from subprocess import Popen, PIPE # replacement for os.system()
import pandas as pd
import numpy as np
import warnings
#from input import *
# Mutual parameters across all processes
#daydate=sys.argv[1]
global DATA_PATH # path to data files including module.json. Global context
#DATA_PATH = os.path.abspath(pkg_resources.resource_filename('bifacial_radiance', 'data/') )
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def _findme(lst, a): #find string match in a list. script from stackexchange
return [i for i, x in enumerate(lst) if x == a]
def _missingKeyWarning(dictype, missingkey, newvalue): # prints warnings
if type(newvalue) is bool:
valueunit = ''
else:
valueunit = 'm'
print("Warning: {} Dictionary Parameters passed, but {} is missing. ".format(dictype, missingkey))
print("Setting it to default value of {} {} to continue\n".format(newvalue, valueunit))
def _normRGB(r, g, b): #normalize by each color for human vision sensitivity
return r*0.216+g*0.7152+b*0.0722
def _popen(cmd, data_in, data_out=PIPE):
"""
Helper function subprocess.popen replaces os.system
- gives better input/output process control
usage: pass <data_in> to process <cmd> and return results
based on rgbeimage.py (<NAME> 2010)
"""
if type(cmd) == str:
cmd = str(cmd) # gets rid of unicode oddities
shell=True
else:
shell=False
p = Popen(cmd, bufsize=-1, stdin=PIPE, stdout=data_out, stderr=PIPE, shell=shell) #shell=True required for Linux? quick fix, but may be security concern
data, err = p.communicate(data_in)
#if err:
# return 'message: '+err.strip()
#if data:
# return data. in Python3 this is returned as `bytes` and needs to be decoded
if err:
if data:
returntuple = (data.decode('latin1'), 'message: '+err.decode('latin1').strip())
else:
returntuple = (None, 'message: '+err.decode('latin1').strip())
else:
if data:
returntuple = (data.decode('latin1'), None) #Py3 requires decoding
else:
returntuple = (None, None)
return returntuple
def _interactive_load(title=None):
# Tkinter file picker
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring window into foreground
return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir
def _interactive_directory(title=None):
# Tkinter directory picker. Now Py3.6 compliant!
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring to front
return filedialog.askdirectory(parent=root, title=title)
def _modDict(originaldict, moddict, relative=False):
'''
Compares keys in originaldict with moddict and updates values of
originaldict to moddict if existing.
Parameters
----------
originaldict : dictionary
Original dictionary calculated, for example frontscan or backscan dictionaries.
moddict : dictionary
Modified dictinoary, for example modscan['xstart'] = 0 to change position of x.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
originaldict : dictionary
Updated original dictionary with values from moddict.
'''
newdict = originaldict.copy()
for key in moddict:
try:
if relative:
newdict[key] = moddict[key] + newdict[key]
else:
newdict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return newdict
def _heightCasesSwitcher(sceneDict, preferred='hub_height', nonpreferred='clearance_height'):
"""
Parameters
----------
sceneDict : dictionary
Dictionary that might contain more than one way of defining height for
the array: `clearance_height`, `hub_height`, `height`*
* height deprecated from sceneDict. This function helps choose
* which definition to use.
preferred : str, optional
When sceneDict has hub_height and clearance_height, or it only has height,
it will leave only the preferred option.. The default is 'hub_height'.
nonpreferred : TYPE, optional
When sceneDict has hub_height and clearance_height,
it wil ldelete this nonpreferred option. The default is 'clearance_height'.
Returns
-------
sceneDict : TYPE
Dictionary now containing the appropriate definition for system height.
use_clearanceheight : Bool
Helper variable to specify if dictionary has only clearancehet for
use inside `makeScene1axis`. Will get deprecated once that internal
function is streamlined.
"""
# TODO: When we update to python 3.9.0, this could be a Switch Cases (Structural Pattern Matching):
heightCases = '_'
if 'height' in sceneDict:
heightCases = heightCases+'height__'
if 'clearance_height' in sceneDict:
heightCases = heightCases+'clearance_height__'
if 'hub_height' in sceneDict:
heightCases = heightCases+'hub_height__'
use_clearanceheight = False
# CASES:
if heightCases == '_height__':
print("sceneDict Warning: 'height' is being deprecated. "+
"Renaming as "+preferred)
sceneDict[preferred]=sceneDict['height']
del sceneDict['height']
elif heightCases == '_clearance_height__':
#print("Using clearance_height.")
use_clearanceheight = True
elif heightCases == '_hub_height__':
#print("Using hub_height.'")
pass
elif heightCases == '_height__clearance_height__':
print("sceneDict Warning: 'clearance_height and 'height' "+
"(deprecated) are being passed. removing 'height' "+
"from sceneDict for this tracking routine")
del sceneDict['height']
use_clearanceheight = True
elif heightCases == '_height__hub_height__':
print("sceneDict Warning: 'height' is being deprecated. Using 'hub_height'")
del sceneDict['height']
elif heightCases == '_height__clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height', 'clearance_height'"+
", and 'height' are being passed. Removing 'height'"+
" (deprecated) and "+ nonpreferred+ ", using "+preferred)
del sceneDict[nonpreferred]
elif heightCases == '_clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height' and 'clearance_height'"+
" are being passed. Using "+preferred+
" and removing "+ nonpreferred)
del sceneDict[nonpreferred]
else:
print ("sceneDict Error! no argument in sceneDict found "+
"for 'hub_height', 'height' nor 'clearance_height'. "+
"Exiting routine.")
return sceneDict, use_clearanceheight
def _is_leap_and_29Feb(s): # Removes Feb. 29 if it a leap year.
return (s.index.year % 4 == 0) & \
((s.index.year % 100 != 0) | (s.index.year % 400 == 0)) & \
(s.index.month == 2) & (s.index.day == 29)
def _subhourlydatatoGencumskyformat(gencumskydata, label='right'):
# Subroutine to resample, pad, remove leap year and get data in the
# 8760 hourly format
# for saving the temporary files for gencumsky in _saveTempTMY and
# _makeTrackerCSV
#Resample to hourly. Gencumsky wants right-labeled data.
gencumskydata = gencumskydata.resample('60T', closed='right', label='right').mean()
if label == 'left': #switch from left to right labeled by adding an hour
gencumskydata.index = gencumskydata.index + pd.to_timedelta('1H')
# Padding
tzinfo = gencumskydata.index.tzinfo
padstart = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0],1,1,1,0 ) ).tz_localize(tzinfo)
padend = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0]+1,1,1,0,0) ).tz_localize(tzinfo)
gencumskydata.iloc[0] = 0 # set first datapt to zero to forward fill w zeros
gencumskydata.iloc[-1] = 0 # set last datapt to zero to forward fill w zeros
# check if index exists. I'm sure there is a way to do this backwards.
if any(gencumskydata.index.isin([padstart])):
print("Data starts on Jan. 01")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padstart]))
gencumskydata=pd.concat([gencumskydata,pd.DataFrame(index=[padstart])])
if any(gencumskydata.index.isin([padend])):
print("Data ends on Dec. 31st")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padend]))
gencumskydata=pd.concat([gencumskydata, pd.DataFrame(index=[padend])])
gencumskydata.loc[padstart]=0
gencumskydata.loc[padend]=0
gencumskydata=gencumskydata.sort_index()
# Fill empty timestamps with zeros
gencumskydata = gencumskydata.resample('60T').asfreq().fillna(0)
# Mask leap year
leapmask = ~(_is_leap_and_29Feb(gencumskydata))
gencumskydata = gencumskydata[leapmask]
if (gencumskydata.index.year[-1] == gencumskydata.index.year[-2]+1) and len(gencumskydata)>8760:
gencumskydata = gencumskydata[:-1]
return gencumskydata
# end _subhourlydatatoGencumskyformat
class RadianceObj:
"""
The RadianceObj top level class is used to work on radiance objects,
keep track of filenames, sky values, PV module configuration, etc.
Parameters
----------
name : text to append to output files
filelist : list of Radiance files to create oconv
nowstr : current date/time string
path : working directory with Radiance materials and objects
Methods
-------
__init__ : initialize the object
_setPath : change the working directory
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, name=None, path=None, hpc=False):
'''
initialize RadianceObj with path of Radiance materials and objects,
as well as a basename to append to
Parameters
----------
name: string, append temporary and output files with this value
path: location of Radiance materials and objects
hpc: Keeps track if User is running simulation on HPC so some file
reading routines try reading a bit longer and some writing
routines (makeModule) that overwrite themselves are inactivated.
Returns
-------
none
'''
self.metdata = {} # data from epw met file
self.data = {} # data stored at each timestep
self.path = "" # path of working directory
self.name = "" # basename to append
#self.filelist = [] # list of files to include in the oconv
self.materialfiles = [] # material files for oconv
self.skyfiles = [] # skyfiles for oconv
self.radfiles = [] # scene rad files for oconv
self.octfile = [] #octfile name for analysis
self.Wm2Front = 0 # cumulative tabulation of front W/m2
self.Wm2Back = 0 # cumulative tabulation of rear W/m2
self.backRatio = 0 # ratio of rear / front Wm2
self.nMods = None # number of modules per row
self.nRows = None # number of rows per scene
self.hpc = hpc # HPC simulation is being run. Some read/write functions are modified
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
# DEFAULTS
if name is None:
self.name = self.nowstr # set default filename for output files
else:
self.name = name
self.basename = name # add backwards compatibility for prior versions
#self.__name__ = self.name #optional info
#self.__str__ = self.__name__ #optional info
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
# load files in the /materials/ directory
self.materialfiles = self.returnMaterialFiles('materials')
def _setPath(self, path):
"""
setPath - move path and working directory
"""
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
# check for path in the new Radiance directory:
def _checkPath(path): # create the file structure if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
_checkPath('images'); _checkPath('objects')
_checkPath('results'); _checkPath('skies'); _checkPath('EPWs')
# if materials directory doesn't exist, populate it with ground.rad
# figure out where pip installed support files.
from shutil import copy2
if not os.path.exists('materials'): #copy ground.rad to /materials
os.makedirs('materials')
print('Making path: materials')
copy2(os.path.join(DATA_PATH, 'ground.rad'), 'materials')
# if views directory doesn't exist, create it with two default views - side.vp and front.vp
if not os.path.exists('views'):
os.makedirs('views')
with open(os.path.join('views', 'side.vp'), 'w') as f:
f.write('rvu -vtv -vp -10 1.5 3 -vd 1.581 0 -0.519234 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
with open(os.path.join('views', 'front.vp'), 'w') as f:
f.write('rvu -vtv -vp 0 -3 5 -vd 0 0.894427 -0.894427 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
def getfilelist(self):
"""
Return concat of matfiles, radfiles and skyfiles
"""
return self.materialfiles + self.skyfiles + self.radfiles
def save(self, savefile=None):
"""
Pickle the radiance object for further use.
Very basic operation - not much use right now.
Parameters
----------
savefile : str
Optional savefile name, with .pickle extension.
Otherwise default to save.pickle
"""
import pickle
if savefile is None:
savefile = 'save.pickle'
with open(savefile, 'wb') as f:
pickle.dump(self, f)
print('Saved to file {}'.format(savefile))
#def setHPC(self, hpc=True):
# self.hpc = hpc
def addMaterial(self, material, Rrefl, Grefl, Brefl, materialtype='plastic',
specularity=0, roughness=0, material_file=None, comment=None, rewrite=True):
"""
Function to add a material in Radiance format.
Parameters
----------
material : str
DESCRIPTION.
Rrefl : str
Reflectivity for first wavelength, or 'R' bin.
Grefl : str
Reflecstrtivity for second wavelength, or 'G' bin.
Brefl : str
Reflectivity for third wavelength, or 'B' bin.
materialtype : str, optional
Type of material. The default is 'plastic'. Others can be mirror,
trans, etc. See RADIANCe documentation.
specularity : str, optional
Ratio of reflection that is specular and not diffuse. The default is 0.
roughness : str, optional
This is the microscopic surface roughness: the more jagged the
facets are, the rougher it is and more blurry reflections will appear.
material_file : str, optional
DESCRIPTION. The default is None.
comment : str, optional
DESCRIPTION. The default is None.
rewrite : str, optional
DESCRIPTION. The default is True.
Returns
-------
None. Just adds the material to the material_file specified or the
default in ``materials\ground.rad``.
References:
See examples of documentation for more materialtype details.
http://www.jaloxa.eu/resources/radiance/documentation/docs/radiance_tutorial.pdf page 10
Also, you can use https://www.jaloxa.eu/resources/radiance/colour_picker.shtml
to have a sense of how the material would look with the RGB values as
well as specularity and roughness.
To understand more on reflectivity, specularity and roughness values
https://thinkmoult.com/radiance-specularity-and-roughness-value-examples.html
"""
if material_file is None:
material_file = 'ground.rad'
matfile = os.path.join('materials', material_file)
with open(matfile, 'r') as fp:
buffer = fp.readlines()
# search buffer for material matching requested addition
found = False
for i in buffer:
if materialtype and material in i:
loc = buffer.index(i)
found = True
break
if found:
if rewrite:
print('Material exists, overwriting...\n')
if comment is None:
pre = loc - 1
else:
pre = loc - 2
# commit buffer without material match
with open(matfile, 'w') as fp:
for i in buffer[0:pre]:
fp.write(i)
for i in buffer[loc+4:]:
fp.write(i)
if (found and rewrite) or (not found):
# append -- This will create the file if it doesn't exist
file_object = open(matfile, 'a')
file_object.write("\n\n")
if comment is not None:
file_object.write("#{}".format(comment))
file_object.write("\nvoid {} {}".format(materialtype, material))
if materialtype == 'glass':
file_object.write("\n0\n0\n3 {} {} {}".format(Rrefl, Grefl, Brefl))
else:
file_object.write("\n0\n0\n5 {} {} {} {} {}".format(Rrefl, Grefl, Brefl, specularity, roughness))
file_object.close()
print('Added material {} to file {}'.format(material, material_file))
if (found and not rewrite):
print('Material already exists\n')
def exportTrackerDict(self, trackerdict=None,
savefile=None, reindex=None):
"""
Use :py:func:`~bifacial_radiance.load._exportTrackerDict` to save a
TrackerDict output as a csv file.
Parameters
----------
trackerdict
The tracker dictionary to save
savefile : str
path to .csv save file location
reindex : bool
True saves the trackerdict in TMY format, including rows for hours
where there is no sun/irradiance results (empty)
"""
import bifacial_radiance.load
if trackerdict is None:
trackerdict = self.trackerdict
if savefile is None:
savefile = _interactive_load(title='Select a .csv file to save to')
if reindex is None:
if self.cumulativesky is True:
# don't re-index for cumulativesky,
# which has angles for index
reindex = False
else:
reindex = True
if self.cumulativesky is True and reindex is True:
# don't re-index for cumulativesky,
# which has angles for index
print ("\n Warning: For cumulativesky simulations, exporting the "
"TrackerDict requires reindex = False. Setting reindex = "
"False and proceeding")
reindex = False
bifacial_radiance.load._exportTrackerDict(trackerdict,
savefile,
reindex)
def loadtrackerdict(self, trackerdict=None, fileprefix=None):
"""
Use :py:class:`bifacial_radiance.load._loadtrackerdict`
to browse the results directory and load back any results saved in there.
Parameters
----------
trackerdict
fileprefix : str
"""
from bifacial_radiance.load import loadTrackerDict
if trackerdict is None:
trackerdict = self.trackerdict
(trackerdict, totaldict) = loadTrackerDict(trackerdict, fileprefix)
self.Wm2Front = totaldict['Wm2Front']
self.Wm2Back = totaldict['Wm2Back']
def returnOctFiles(self):
"""
Return files in the root directory with `.oct` extension
Returns
-------
oct_files : list
List of .oct files
"""
oct_files = [f for f in os.listdir(self.path) if f.endswith('.oct')]
#self.oct_files = oct_files
return oct_files
def returnMaterialFiles(self, material_path=None):
"""
Return files in the Materials directory with .rad extension
appends materials files to the oconv file list
Parameters
----------
material_path : str
Optional parameter to point to a specific materials directory.
otherwise /materials/ is default
Returns
-------
material_files : list
List of .rad files
"""
if material_path is None:
material_path = 'materials'
material_files = [f for f in os.listdir(os.path.join(self.path,
material_path)) if f.endswith('.rad')]
materialfilelist = [os.path.join(material_path, f) for f in material_files]
self.materialfiles = materialfilelist
return materialfilelist
def setGround(self, material=None, material_file=None):
"""
Use GroundObj constructor class and return a ground object
Parameters
------------
material : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
self.ground : tuple
self.ground.normval : numeric
Normalized color value
self.ground.ReflAvg : numeric
Average reflectance
"""
if material is None:
try:
if self.metdata.albedo is not None:
material = self.metdata.albedo
print(" Assigned Albedo from metdata.albedo")
except:
pass
self.ground = GroundObj(material, material_file)
def getEPW(self, lat=None, lon=None, GetAll=False):
"""
Subroutine to download nearest epw files to latitude and longitude provided,
into the directory \EPWs\
based on github/aahoo.
.. warning::
verify=false is required to operate within NREL's network.
to avoid annoying warnings, insecurerequestwarning is disabled
currently this function is not working within NREL's network. annoying!
Parameters
----------
lat : decimal
Used to find closest EPW file.
lon : decimal
Longitude value to find closest EPW file.
GetAll : boolean
Download all available files. Note that no epw file will be loaded into memory
"""
import requests, re
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
hdr = {'User-Agent' : "Magic Browser",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
path_to_save = 'EPWs' # create a directory and write the name of directory here
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
def _returnEPWnames():
''' return a dataframe with the name, lat, lon, url of available files'''
r = requests.get('https://github.com/NREL/EnergyPlus/raw/develop/weather/master.geojson', verify=False)
data = r.json() #metadata for available files
#download lat/lon and url details for each .epw file into a dataframe
df = pd.DataFrame({'url':[], 'lat':[], 'lon':[], 'name':[]})
for location in data['features']:
match = re.search(r'href=[\'"]?([^\'" >]+)', location['properties']['epw'])
if match:
url = match.group(1)
name = url[url.rfind('/') + 1:]
lontemp = location['geometry']['coordinates'][0]
lattemp = location['geometry']['coordinates'][1]
dftemp = pd.DataFrame({'url':[url], 'lat':[lattemp], 'lon':[lontemp], 'name':[name]})
#df = df.append(dftemp, ignore_index=True)
df = pd.concat([df, dftemp], ignore_index=True)
return df
def _findClosestEPW(lat, lon, df):
#locate the record with the nearest lat/lon
errorvec = np.sqrt(np.square(df.lat - lat) + np.square(df.lon - lon))
index = errorvec.idxmin()
url = df['url'][index]
name = df['name'][index]
return url, name
def _downloadEPWfile(url, path_to_save, name):
r = requests.get(url, verify=False, headers=hdr)
if r.ok:
filename = os.path.join(path_to_save, name)
# py2 and 3 compatible: binary write, encode text first
with open(filename, 'wb') as f:
f.write(r.text.encode('ascii', 'ignore'))
print(' ... OK!')
else:
print(' connection error status code: %s' %(r.status_code))
r.raise_for_status()
# Get the list of EPW filenames and lat/lon
df = _returnEPWnames()
# find the closest EPW file to the given lat/lon
if (lat is not None) & (lon is not None) & (GetAll is False):
url, name = _findClosestEPW(lat, lon, df)
# download the EPW file to the local drive.
print('Getting weather file: ' + name)
_downloadEPWfile(url, path_to_save, name)
self.epwfile = os.path.join('EPWs', name)
elif GetAll is True:
if input('Downloading ALL EPW files available. OK? [y/n]') == 'y':
# get all of the EPW files
for index, row in df.iterrows():
print('Getting weather file: ' + row['name'])
_downloadEPWfile(row['url'], path_to_save, row['name'])
self.epwfile = None
else:
print('Nothing returned. Proper usage: epwfile = getEPW(lat,lon)')
self.epwfile = None
return self.epwfile
def readWeatherFile(self, weatherFile=None, starttime=None,
endtime=None, label=None, source=None,
coerce_year=None, tz_convert_val=None):
"""
Read either a EPW or a TMY file, calls the functions
:py:class:`~bifacial_radiance.readTMY` or
:py:class:`~bifacial_radiance.readEPW`
according to the weatherfile extention.
Parameters
----------
weatherFile : str
File containing the weather information. EPW, TMY or solargis accepted.
starttime : str
Limited start time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
endtime : str
Limited end time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
daydate : str DEPRECATED
For single day in 'MM/DD' or MM_DD format. Now use starttime and
endtime set to the same date.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
source : str
To help identify different types of .csv files. If None, it assumes
it is a TMY3-style formated data. Current options: 'TMY3',
'solargis', 'EPW'
coerce_year : int
Year to coerce weather data to in YYYY format, ie 2021.
If more than one year of data in the weather file, year is NOT coerced.
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
"""
#from datetime import datetime
import warnings
if weatherFile is None:
if hasattr(self,'epwfile'):
weatherFile = self.epwfile
else:
try:
weatherFile = _interactive_load('Select EPW or TMY3 climate file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
if coerce_year is not None:
coerce_year = int(coerce_year)
if str(coerce_year).__len__() != 4:
warnings.warn('Incorrect coerce_year. Setting to None')
coerce_year = None
def _parseTimes(t, hour, coerce_year):
'''
parse time input t which could be string mm_dd_HH or YYYY-mm-dd_HHMM
or datetime.datetime object. Return pd.datetime object. Define
hour as hour input if not passed directly.
'''
import re
if type(t) == str:
try:
tsplit = re.split('-|_| ', t)
#mm_dd format
if tsplit.__len__() == 2 and t.__len__() == 5:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
tsplit.append(str(hour).rjust(2,'0')+'00')
#mm_dd_hh or YYYY_mm_dd format
elif tsplit.__len__() == 3 :
if tsplit[0].__len__() == 2:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
elif tsplit[0].__len__() == 4:
tsplit.append(str(hour).rjust(2,'0')+'00')
#YYYY-mm-dd_HHMM format
if tsplit.__len__() == 4 and tsplit[0].__len__() == 4:
t_out = pd.to_datetime(''.join(tsplit).ljust(12,'0') )
else:
raise Exception(f'incorrect time string passed {t}.'
'Valid options: mm_dd, mm_dd_HH, '
'mm_dd_HHMM, YYYY-mm-dd_HHMM')
except Exception as e:
# Error for incorrect string passed:
raise(e)
else: #datetime or timestamp
try:
t_out = pd.to_datetime(t)
except pd.errors.ParserError:
print('incorrect time object passed. Valid options: '
'string or datetime.datetime or pd.timeIndex. You '
f'passed {type(t)}.')
return t_out, coerce_year
# end _parseTimes
def _tz_convert(metdata, metadata, tz_convert_val):
"""
convert metdata to a different local timzone. Particularly for
SolarGIS weather files which are returned in UTC by default.
----------
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
Returns: metdata, metadata
"""
import pytz
if (type(tz_convert_val) == int) | (type(tz_convert_val) == float):
metadata['TZ'] = tz_convert_val
metdata = metdata.tz_convert(pytz.FixedOffset(tz_convert_val*60))
return metdata, metadata
# end _tz_convert
if source is None:
if weatherFile[-3:].lower() == 'epw':
source = 'EPW'
else:
print('Warning: CSV file passed for input. Assuming it is TMY3'+
'style format')
source = 'TMY3'
if label is None:
label = 'right' # EPW and TMY are by deffault right-labeled.
if source.lower() == 'solargis':
if label is None:
label = 'center'
metdata, metadata = self._readSOLARGIS(weatherFile, label=label)
if source.lower() =='epw':
metdata, metadata = self._readEPW(weatherFile, label=label)
if source.lower() =='tmy3':
metdata, metadata = self._readTMY(weatherFile, label=label)
metdata, metadata = _tz_convert(metdata, metadata, tz_convert_val)
tzinfo = metdata.index.tzinfo
tempMetDatatitle = 'metdata_temp.csv'
# Parse the start and endtime strings.
if starttime is not None:
starttime, coerce_year = _parseTimes(starttime, 1, coerce_year)
starttime = starttime.tz_localize(tzinfo)
if endtime is not None:
endtime, coerce_year = _parseTimes(endtime, 23, coerce_year)
endtime = endtime.tz_localize(tzinfo)
'''
#TODO: do we really need this check?
if coerce_year is not None and starttime is not None:
if coerce_year != starttime.year or coerce_year != endtime.year:
print("Warning: Coerce year does not match requested sampled "+
"date(s)'s years. Setting Coerce year to None.")
coerce_year = None
'''
tmydata_trunc = self._saveTempTMY(metdata, filename=tempMetDatatitle,
starttime=starttime, endtime=endtime,
coerce_year=coerce_year,
label=label)
if tmydata_trunc.__len__() > 0:
self.metdata = MetObj(tmydata_trunc, metadata, label = label)
else:
self.metdata = None
raise Exception('Weather file returned zero points for the '
'starttime / endtime provided')
return self.metdata
def _saveTempTMY(self, tmydata, filename=None, starttime=None, endtime=None,
coerce_year=None, label=None):
'''
private function to save part or all of tmydata into /EPWs/ for use
in gencumsky -G mode and return truncated tmydata. Gencumsky 8760
starts with Jan 1, 1AM and ends Dec 31, 2400
starttime: tz-localized pd.TimeIndex
endtime: tz-localized pd.TimeIndex
returns: tmydata_truncated : subset of tmydata based on start & end
'''
if filename is None:
filename = 'temp.csv'
gencumskydata = None
gencumdict = None
if len(tmydata) == 8760:
print("8760 line in WeatherFile. Assuming this is a standard hourly"+
" WeatherFile for the year for purposes of saving Gencumulativesky"+
" temporary weather files in EPW folder.")
if coerce_year is None and starttime is not None:
coerce_year = starttime.year
# SILVANA: If user doesn't pass starttime, and doesn't select
# coerce_year, then do we really need to coerce it?
elif coerce_year is None:
coerce_year = 2021
print(f"Coercing year to {coerce_year}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# Correcting last index to next year.
tmydata.index.values[-1] = tmydata.index[-1] + pd.DateOffset(year=(coerce_year+1))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
starttime
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
else:
if len(tmydata.index.year.unique()) == 1:
if coerce_year:
# TODO: check why subhourly data still has 0 entries on the next day on _readTMY3
# in the meantime, let's make Silvana's life easy by just deletig 0 entries
tmydata = tmydata[~(tmydata.index.hour == 0)]
print(f"Coercing year to {coerce_year}")
# TODO: this coercing shows a python warning. Turn it off or find another method? bleh.
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
if coerce_year:
print("More than 1 year of data identified. Can't do coercing")
# Check if years are consecutive
l = list(tmydata.index.year.unique())
if l != list(range(min(l), max(l)+1)):
print("Years are not consecutive. Won't be able to use Gencumsky"+
" because who knows what's going on with this data.")
else:
print("Years are consecutive. For Gencumsky, make sure to select"+
" which yearly temporary weather file you want to use"+
" else they will all get accumulated to same hour/day")
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata = tmydata[filterdates] # Reducing years potentially
# Checking if filtering reduced to just 1 year to do usual savin.
if len(tmydata.index.year.unique()) == 1:
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
gencumdict = [g for n, g in tmydata.groupby(pd.Grouper(freq='Y'))]
for ii in range(0, len(gencumdict)):
gencumskydata = gencumdict[ii]
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
gencumdict[ii] = gencumskydata
gencumskydata = None # clearing so that the dictionary style can be activated.
# Let's save files in EPWs folder for Gencumsky
if gencumskydata is not None:
csvfile = os.path.join('EPWs', filename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile = csvfile
if gencumdict is not None:
self.gencumsky_metfile = []
for ii in range (0, len(gencumdict)):
gencumskydata = gencumdict[ii]
newfilename = filename.split('.')[0]+'_year_'+str(ii)+'.csv'
csvfile = os.path.join('EPWs', newfilename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile.append(csvfile)
return tmydata
def _readTMY(self, tmyfile=None, label = 'right', coerce_year=None):
'''
use pvlib to read in a tmy3 file.
Note: pvlib 0.7 does not currently support sub-hourly files. Until
then, use _readTMYdate() to create the index
Parameters
------------
tmyfile : str
Filename of tmy3 to be read with pvlib.tmy.readtmy3
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce to. Default is 2021.
Returns
-------
metdata - MetObj collected from TMY3 file
'''
def _convertTMYdate(data, meta):
''' requires pvlib 0.8, updated to handle subhourly timestamps '''
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data['Date (MM/DD/YYYY)'])
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data['Time (HH:MM)'].str[:2].astype(int) % 24
minute = data['Time (HH:MM)'].str[3:].astype(int)
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = (data_ymd + pd.to_timedelta(shifted_hour, unit='h') +
pd.to_timedelta(minute, unit='min') )
data = data.tz_localize(int(meta['TZ'] * 3600))
return data
import pvlib
#(tmydata, metadata) = pvlib.tmy.readtmy3(filename=tmyfile) #pvlib<=0.6
(tmydata, metadata) = pvlib.iotools.tmy.read_tmy3(filename=tmyfile,
coerce_year=coerce_year)
try:
tmydata = _convertTMYdate(tmydata, metadata)
except KeyError:
print('PVLib >= 0.8.0 is required for sub-hourly data input')
return tmydata, metadata
def _readEPW(self, epwfile=None, label = 'right', coerce_year=None):
"""
Uses readepw from pvlib>0.6.1 but un-do -1hr offset and
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Parameters
------------
epwfile : str
Direction and filename of the epwfile. If None, opens an interactive
loading window.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce data to.
"""
import pvlib
#import re
'''
NOTE: In PVLib > 0.6.1 the new epw.read_epw() function reads in time
with a default -1 hour offset. This is reflected in our existing
workflow.
'''
#(tmydata, metadata) = readepw(epwfile) #
(tmydata, metadata) = pvlib.iotools.epw.read_epw(epwfile,
coerce_year=coerce_year) #pvlib>0.6.1
#pvlib uses -1hr offset that needs to be un-done. Why did they do this?
tmydata.index = tmydata.index+pd.Timedelta(hours=1)
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
tmydata.rename(columns={'dni':'DNI',
'dhi':'DHI',
'temp_air':'DryBulb',
'wind_speed':'Wspd',
'ghi':'GHI',
'albedo':'Alb'
}, inplace=True)
return tmydata, metadata
def _readSOLARGIS(self, filename=None, label='center'):
"""
Read solarGIS data file which is timestamped in UTC.
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Timezone is always returned as UTC. Use tz_convert in readWeatherFile
to manually convert to local time
Parameters
------------
filename : str
filename of the solarGIS file.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval. SolarGis default style is center,
unless user requests a right label.
"""
# file format: anything with # preceding is in the header
header = []; lat = None; lon = None; elev = None; name = None
with open(filename, 'r') as result:
for line in result:
if line.startswith('#'):
header.append(line)
if line.startswith('#Latitude:'):
lat = line[11:]
if line.startswith('#Longitude:'):
lon = line[12:]
if line.startswith('#Elevation:'):
elev = line[12:17]
if line.startswith('#Site name:'):
name = line[12:-1]
else:
break
metadata = {'latitude':float(lat),
'longitude':float(lon),
'altitude':float(elev),
'Name':name,
'TZ':0.0}
# read in remainder of data
data = pd.read_csv(filename,skiprows=header.__len__(), delimiter=';')
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
data.rename(columns={'DIF':'DHI',
'TEMP':'DryBulb',
'WS':'Wspd',
}, inplace=True)
# Generate index from Date (DD.HH.YYYY) and Time
data.index = pd.to_datetime(data.Date + ' ' + data.Time,
dayfirst=True, utc=True,
infer_datetime_format = True)
return data, metadata
def getSingleTimestampTrackerAngle(self, metdata, timeindex, gcr=None,
azimuth=180, axis_tilt=0,
limit_angle=45, backtrack=True):
"""
Helper function to calculate a tracker's angle for use with the
fixed tilt routines of bifacial_radiance. It calculates tracker angle for
sun position at the timeindex passed (no left or right time offset,
label = 'center')
Parameters
----------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
timeindex : int
Index between 0 to 8760 indicating hour to simulate.
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
azimuth : float or int
Orientation axis of tracker torque tube. Default North-South (180 deg)
axis_tilt : float or int
Default 0. Axis tilt -- not implemented in sensors locations so it's pointless
at this release to change it.
limit_angle : float or int
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
backtrack : boolean
Whether backtracking is enabled (default = True)
"""
'''
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
timestamp = metdata.datetime[timeindex]
'''
import pvlib
solpos = metdata.solpos.iloc[timeindex]
sunzen = float(solpos.apparent_zenith)
sunaz = float(solpos.azimuth) # not substracting the 180
trackingdata = pvlib.tracking.singleaxis(sunzen, sunaz,
axis_tilt, azimuth,
limit_angle, backtrack, gcr)
tracker_theta = float(np.round(trackingdata['tracker_theta'],2))
tracker_theta = tracker_theta*-1 # bifacial_radiance uses East (morning) theta as positive
return tracker_theta
def gendaylit(self, timeindex, metdata=None, debug=False):
"""
Sets and returns sky information using gendaylit.
Uses PVLIB for calculating the sun position angles instead of
using Radiance internal sun position calculation (for that use gendaylit function)
Parameters
----------
timeindex : int
Index from 0 to ~4000 of the MetObj (daylight hours only)
metdata : ``MetObj``
MetObj object with list of dni, dhi, ghi and location
debug : bool
Flag to print output of sky DHI and DNI
Returns
-------
skyname : str
Sets as a self.skyname and returns filename of sky in /skies/ directory.
If errors exist, such as DNI = 0 or sun below horizon, this skyname is None
"""
import warnings
if metdata is None:
try:
metdata = self.metdata
except:
print('usage: pass metdata, or run after running ' +
'readWeatherfile() ')
return
ground = self.ground
locName = metdata.city
dni = metdata.dni[timeindex]
dhi = metdata.dhi[timeindex]
ghi = metdata.ghi[timeindex]
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
# Assign Albedos
try:
if ground.ReflAvg.shape == metdata.dni.shape:
groundindex = timeindex
elif self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
warnings.warn("Shape of ground Albedos and TMY data do not match.")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
if debug is True:
print('Sky generated with Gendaylit, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
print("Datetime TimeIndex", metdata.datetime[timeindex])
#Time conversion to correct format and offset.
#datetime = metdata.sunrisesetdata['corrected_timestamp'][timeindex]
#Don't need any of this any more. Already sunrise/sunset corrected and offset by appropriate interval
# get solar position zenith and azimuth based on site metadata
#solpos = pvlib.irradiance.solarposition.get_solarposition(datetimetz,lat,lon,elev)
solpos = metdata.solpos.iloc[timeindex]
sunalt = float(solpos.elevation)
# Radiance expects azimuth South = 0, PVlib gives South = 180. Must substract 180 to match.
sunaz = float(solpos.azimuth)-180.0
sky_path = 'skies'
if dhi <= 0:
self.skyfiles = [None]
return None
# We should already be filtering for elevation >0. But just in case...
if sunalt <= 0:
sunalt = np.arcsin((ghi-dhi)/(dni+.001))*180/np.pi # reverse engineer elevation from ghi, dhi, dni
print('Warning: negative sun elevation at '+
'{}. '.format(metdata.datetime[timeindex])+
'Re-calculated elevation: {:0.2}'.format(sunalt))
# Note - -W and -O1 option is used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# location name: " + str(locName) + " LAT: " + str(lat)
+" LON: " + str(lon) + " Elev: " + str(elev) + "\n"
"# Sun position calculated w. PVLib\n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
ground._makeGroundString(index=groundindex, cumulativesky=False)
time = metdata.datetime[timeindex]
#filename = str(time)[2:-9].replace('-','_').replace(' ','_').replace(':','_')
filename = time.strftime('%Y-%m-%d_%H%M')
skyname = os.path.join(sky_path,"sky2_%s_%s_%s.rad" %(lat, lon, filename))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def gendaylit2manual(self, dni, dhi, sunalt, sunaz):
"""
Sets and returns sky information using gendaylit.
Uses user-provided data for sun position and irradiance.
.. warning::
This generates the sky at the sun altitude&azimuth provided, make
sure it is the right position relative to how the weather data got
created and read (i.e. label right, left or center).
Parameters
------------
dni: int or float
Direct Normal Irradiance (DNI) value, in W/m^2
dhi : int or float
Diffuse Horizontal Irradiance (DHI) value, in W/m^2
sunalt : int or float
Sun altitude (degrees)
sunaz : int or float
Sun azimuth (degrees)
Returns
-------
skyname : string
Filename of sky in /skies/ directory
"""
print('Sky generated with Gendaylit 2 MANUAL, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
sky_path = 'skies'
if sunalt <= 0 or dhi <= 0:
self.skyfiles = [None]
return None
# Assign Albedos
try:
if self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
print("Ambiguous albedo entry, Set albedo to single value "
"in setGround()")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
# Note: -W and -O1 are used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# Manual inputs of DNI, DHI, SunAlt and SunAZ into Gendaylit used \n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, self.ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
self.ground._makeGroundString(index=groundindex, cumulativesky=False)
skyname = os.path.join(sky_path, "sky2_%s.rad" %(self.name))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def genCumSky(self, gencumsky_metfile=None, savefile=None):
"""
Generate Skydome using gencumsky.
.. warning::
gencumulativesky.exe is required to be installed,
which is not a standard radiance distribution.
You can find the program in the bifacial_radiance distribution directory
in \Lib\site-packages\bifacial_radiance\data
Use :func:`readWeatherFile(filename, starttime='YYYY-mm-dd_HHMM', endtime='YYYY-mm-dd_HHMM')`
to limit gencumsky simulations instead.
Parameters
------------
gencumsky_metfile : str
Filename with path to temporary created meteorological file usually created
in EPWs folder. This csv file has no headers, no index, and two
space separated columns with values for GHI and DNI for each hour
in the year, and MUST have 8760 entries long otherwise gencumulativesky.exe cries.
savefile : string
If savefile is None, defaults to "cumulative"
Returns
--------
skyname : str
Filename of the .rad file containing cumulativesky info
"""
# TODO: error checking and auto-install of gencumulativesky.exe
# TODO: add check if readWeatherfile has not be done
# TODO: check if it fails if gcc module has been loaded? (common hpc issue)
#import datetime
if gencumsky_metfile is None:
gencumsky_metfile = self.gencumsky_metfile
if isinstance(gencumsky_metfile, str):
print("Loaded ", gencumsky_metfile)
if isinstance(gencumsky_metfile, list):
print("There are more than 1 year of gencumsky temporal weather file saved."+
"You can pass which file you want with gencumsky_metfile input. Since "+
"No year was selected, defaulting to using the first year of the list")
gencumsky_metfile = gencumsky_metfile[0]
print("Loaded ", gencumsky_metfile)
if savefile is None:
savefile = "cumulative"
sky_path = 'skies'
lat = self.metdata.latitude
lon = self.metdata.longitude
timeZone = self.metdata.timezone
'''
cmd = "gencumulativesky +s1 -h 0 -a %s -o %s -m %s %s " %(lat, lon, float(timeZone)*15, filetype) +\
"-time %s %s -date %s %s %s %s %s" % (startdt.hour, enddt.hour+1,
startdt.month, startdt.day,
enddt.month, enddt.day,
gencumsky_metfile)
'''
cmd = (f"gencumulativesky +s1 -h 0 -a {lat} -o {lon} -m "
f"{float(timeZone)*15} -G {gencumsky_metfile}" )
with open(savefile+".cal","w") as f:
_,err = _popen(cmd, None, f)
if err is not None:
print(err)
# Assign Albedos
try:
groundstring = self.ground._makeGroundString(cumulativesky=True)
except:
raise Exception('Error: ground reflection not defined. '
'Run RadianceObj.setGround() first')
return
skyStr = "#Cumulative Sky Definition\n" +\
"void brightfunc skyfunc\n" + \
"2 skybright " + "%s.cal\n" % (savefile) + \
"0\n" + \
"0\n" + \
"\nskyfunc glow sky_glow\n" + \
"0\n" + \
"0\n" + \
"4 1 1 1 0\n" + \
"\nsky_glow source sky\n" + \
"0\n" + \
"0\n" + \
"4 0 0 1 180\n" + \
groundstring
skyname = os.path.join(sky_path, savefile+".rad")
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]#, 'SunFile.rad' ]
return skyname
def set1axis(self, metdata=None, azimuth=180, limit_angle=45,
angledelta=5, backtrack=True, gcr=1.0 / 3, cumulativesky=True,
fixed_tilt_angle=None, useMeasuredTrackerAngle=False,
axis_azimuth=None):
"""
Set up geometry for 1-axis tracking. Pull in tracking angle details from
pvlib, create multiple 8760 metdata sub-files where datetime of met data
matches the tracking angle. Returns 'trackerdict' which has keys equal to
either the tracker angles (gencumsky workflow) or timestamps (gendaylit hourly
workflow)
Parameters
------------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
azimuth : numeric
Orientation axis of tracker torque tube. Default North-South (180 deg).
For fixed-tilt configuration, input is fixed azimuth (180 is south)
limit_angle : numeric
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
angledelta : numeric
Degree of rotation increment to parse irradiance bins. Default 5 degrees.
(0.4 % error for DNI). Other options: 4 (.25%), 2.5 (0.1%).
Note: the smaller the angledelta, the more simulations must be run.
backtrack : bool
Whether backtracking is enabled (default = True)
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
cumulativesky : bool
[True] Wether individual csv files are
created with constant tilt angle for the cumulativesky approach.
if false, the gendaylit tracking approach must be used.
fixed_tilt_angle : numeric
If passed, this changes to a fixed tilt simulation where each hour
uses fixed_tilt_angle and axis_azimuth as the tilt and azimuth
useMeasuredTrackerAngle: Bool
If True, and data for tracker angles has been passed by being included
in the WeatherFile object (column name 'Tracker Angle (degrees)'),
then tracker angles will be set to these values instead of being calculated.
NOTE that the value for azimuth passed to set1axis must be surface
azimuth in the morning and not the axis_azimuth
(i.e. for a N-S HSAT, azimuth = 90).
axis_azimuth : numeric
DEPRECATED. returns deprecation warning. Pass the tracker
axis_azimuth through to azimuth input instead.
Returns
-------
trackerdict : dictionary
Keys represent tracker tilt angles (gencumsky) or timestamps (gendaylit)
and list of csv metfile, and datetimes at that angle
trackerdict[angle]['csvfile';'surf_azm';'surf_tilt';'UTCtime']
- or -
trackerdict[time]['tracker_theta';'surf_azm';'surf_tilt']
"""
# Documentation check:
# Removed Internal variables
# -------
# metdata.solpos dataframe with solar position data
# metdata.surface_azimuth list of tracker azimuth data
# metdata.surface_tilt list of tracker surface tilt data
# metdata.tracker_theta list of tracker tilt angle
import warnings
if metdata == None:
metdata = self.metdata
if metdata == {}:
raise Exception("metdata doesnt exist yet. "+
"Run RadianceObj.readWeatherFile() ")
if axis_azimuth:
azimuth = axis_azimuth
warnings.warn("axis_azimuth is deprecated in set1axis; use azimuth "
"input instead.", DeprecationWarning)
#backtrack = True # include backtracking support in later version
#gcr = 1.0/3.0 # default value - not used if backtrack = False.
# get 1-axis tracker angles for this location, rounded to nearest 'angledelta'
trackerdict = metdata._set1axis(cumulativesky=cumulativesky,
azimuth=azimuth,
limit_angle=limit_angle,
angledelta=angledelta,
backtrack=backtrack,
gcr=gcr,
fixed_tilt_angle=fixed_tilt_angle,
useMeasuredTrackerAngle=useMeasuredTrackerAngle
)
self.trackerdict = trackerdict
self.cumulativesky = cumulativesky
return trackerdict
def gendaylit1axis(self, metdata=None, trackerdict=None, startdate=None,
enddate=None, debug=False):
"""
1-axis tracking implementation of gendaylit.
Creates multiple sky files, one for each time of day.
Parameters
------------
metdata
MetObj output from readWeatherFile. Needs to have
RadianceObj.set1axis() run on it first.
startdate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
enddate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
trackerdict : dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
Returns
-------
Updated trackerdict dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
with the additional dictionary value ['skyfile'] added
"""
if metdata is None:
metdata = self.metdata
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if startdate is not None or enddate is not None:
print("Deprecation Warning: gendyalit1axis no longer downselects"+
" entries by stardate and enddate. Downselect your data"+
" when loading with readWeatherFile")
return
try:
metdata.tracker_theta # this may not exist
except AttributeError:
print("metdata.tracker_theta doesn't exist. Run RadianceObj.set1axis() first")
if debug is False:
print('Creating ~%d skyfiles. '%(len(trackerdict.keys())))
count = 0 # counter to get number of skyfiles created, just for giggles
trackerdict2={}
for i in range(0, len(trackerdict.keys())):
try:
time = metdata.datetime[i]
except IndexError: #out of range error
break #
#filename = str(time)[5:-12].replace('-','_').replace(' ','_')
filename = time.strftime('%Y-%m-%d_%H%M')
self.name = filename
#check for GHI > 0
#if metdata.ghi[i] > 0:
if (metdata.ghi[i] > 0) & (~np.isnan(metdata.tracker_theta[i])):
skyfile = self.gendaylit(metdata=metdata,timeindex=i, debug=debug)
# trackerdict2 reduces the dict to only the range specified.
trackerdict2[filename] = trackerdict[filename]
trackerdict2[filename]['skyfile'] = skyfile
count +=1
print('Created {} skyfiles in /skies/'.format(count))
self.trackerdict = trackerdict2
return trackerdict2
def genCumSky1axis(self, trackerdict=None):
"""
1-axis tracking implementation of gencumulativesky.
Creates multiple .cal files and .rad files, one for each tracker angle.
Use :func:`readWeatherFile` to limit gencumsky simulations
Parameters
------------
trackerdict : dictionary
Trackerdict generated as output by RadianceObj.set1axis()
Returns
-------
trackerdict : dictionary
Trackerdict dictionary with new entry trackerdict.skyfile
Appends 'skyfile' to the 1-axis dict with the location of the sky .radfile
"""
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
for theta in sorted(trackerdict):
# call gencumulativesky with a new .cal and .rad name
csvfile = trackerdict[theta]['csvfile']
savefile = '1axis_%s'%(theta) #prefix for .cal file and skies\*.rad file
skyfile = self.genCumSky(gencumsky_metfile=csvfile, savefile=savefile)
trackerdict[theta]['skyfile'] = skyfile
print('Created skyfile %s'%(skyfile))
# delete default skyfile (not strictly necessary)
self.skyfiles = None
self.trackerdict = trackerdict
return trackerdict
def makeOct(self, filelist=None, octname=None):
"""
Combine everything together into a .oct file
Parameters
----------
filelist : list
Files to include. otherwise takes self.filelist
octname : str
filename (without .oct extension)
Returns
-------
octname : str
filename of .oct file in root directory including extension
err : str
Error message returned from oconv (if any)
"""
if filelist is None:
filelist = self.getfilelist()
if octname is None:
octname = self.name
debug = False
#JSS. With the way that the break is handled now, this will wait the 10 for all the hours
# that were not generated sky files.
if self.hpc :
import time
time_to_wait = 10
time_counter = 0
for file in filelist:
if debug:
print("HPC Checking for file %s" % (file))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#Filesky is being saved as 'none', so it crashes !
while not os.path.exists(file):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
print ("filenotfound")
break
#os.system('oconv '+ ' '.join(filelist) + ' > %s.oct' % (octname))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#cmd = 'oconv ' + ' '.join(filelist)
filelist.insert(0,'oconv')
with open('%s.oct' % (octname), "w") as f:
_,err = _popen(filelist, None, f)
#TODO: exception handling for no sun up
if err is not None:
if err[0:5] == 'error':
raise Exception(err[7:])
if err[0:7] == 'message':
warnings.warn(err[9:], Warning)
#use rvu to see if everything looks good.
# use cmd for this since it locks out the terminal.
#'rvu -vf views\side.vp -e .01 monopanel_test.oct'
print("Created %s.oct" % (octname))
self.octfile = '%s.oct' % (octname)
return '%s.oct' % (octname)
def makeOct1axis(self, trackerdict=None, singleindex=None, customname=None):
"""
Combine files listed in trackerdict into multiple .oct files
Parameters
------------
trackerdict
Output from :py:class:`~bifacial_radiance.RadianceObj.makeScene1axis`
singleindex : str
Single index for trackerdict to run makeOct1axis in single-value mode,
format 'YYYY-MM-DD_HHMM'.
customname : str
Custom text string added to the end of the OCT file name.
Returns
-------
trackerdict
Append 'octfile' to the 1-axis dict with the location of the scene .octfile
"""
if customname is None:
customname = ''
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # loop through all values in the tracker dictionary
indexlist = trackerdict.keys()
else: # just loop through one single index in tracker dictionary
indexlist = [singleindex]
print('\nMaking {} octfiles in root directory.'.format(indexlist.__len__()))
for index in sorted(indexlist): # run through either entire key list of trackerdict, or just a single value
try:
filelist = self.materialfiles + [trackerdict[index]['skyfile'], trackerdict[index]['radfile']]
octname = '1axis_%s%s'%(index, customname)
trackerdict[index]['octfile'] = self.makeOct(filelist, octname)
except KeyError as e:
print('Trackerdict key error: {}'.format(e))
return trackerdict
def makeModule(self, name=None, x=None, y=None, z=None, modulefile=None,
text=None, customtext='', xgap=0.01, ygap=0.0,
zgap=0.1, numpanels=1, rewriteModulefile=True,
glass=False, modulematerial=None, bifi=1, **kwargs):
"""
pass module generation details into ModuleObj(). See ModuleObj()
docstring for more details
"""
from bifacial_radiance import ModuleObj
if name is None:
print("usage: makeModule(name,x,y,z, modulefile = '\objects\*.rad', "+
" zgap = 0.1 (module offset)"+
"numpanels = 1 (# of panels in portrait), ygap = 0.05 "+
"(slope distance between panels when arrayed), "+
"rewriteModulefile = True (or False), bifi = 1")
print("You can also override module_type info by passing 'text'"+
"variable, or add on at the end for racking details with "+
"'customtext'. See function definition for more details")
print("Optional: tubeParams={} (torque tube details including "
"diameter (torque tube dia. in meters), tubetype='Round' "
"(or 'square', 'hex'), material='Metal_Grey' (or 'black')"
", axisofrotation=True (does scene rotate around tube)")
print("Optional: cellModule={} (create cell-level module by "+
" passing in dictionary with keys 'numcellsx'6 (#cells in "+
"X-dir.), 'numcellsy', 'xcell' (cell size in X-dir. in meters),"+
"'ycell', 'xcellgap' (spacing between cells in X-dir.), 'ycellgap'")
print("Optional: omegaParams={} (create the support structure omega by "+
"passing in dictionary with keys 'omega_material' (the material of "+
"omega), 'mod_overlap'(the length of the module adjacent piece of"+
" omega that overlaps with the module),'x_omega1', 'y_omega' (ideally same"+
" for all the parts of omega),'z_omega1', 'x_omega2' (X-dir length of the"+
" vertical piece), 'x_omega3', z_omega3")
return
"""
# TODO: check for deprecated torquetube and axisofrotationTorqueTube in
kwargs.
"""
if 'tubeParams' in kwargs:
tubeParams = kwargs.pop('tubeParams')
else:
tubeParams = None
if 'torquetube' in kwargs:
torquetube = kwargs.pop('torquetube')
print("\nWarning: boolean input `torquetube` passed into makeModule"
". Starting in v0.4.0 this boolean parameter is deprecated."
" Use module.addTorquetube() with `visible` parameter instead.")
if tubeParams:
tubeParams['visible'] = torquetube
elif (tubeParams is None) & (torquetube is True):
tubeParams = {'visible':True} # create default TT
if 'axisofrotationTorqueTube' in kwargs:
axisofrotation = kwargs.pop('axisofrotationTorqueTube')
print("\nWarning: input boolean `axisofrotationTorqueTube` passed "
"into makeModule. Starting in v0.4.0 this boolean parameter is"
" deprecated. Use module.addTorquetube() with `axisofrotation`"
"parameter instead.")
if tubeParams: #this kwarg only does somehting if there's a TT.
tubeParams['axisofrotation'] = axisofrotation
if self.hpc: # trigger HPC simulation in ModuleObj
kwargs['hpc']=True
self.module = ModuleObj(name=name, x=x, y=y, z=z, bifi=bifi, modulefile=modulefile,
text=text, customtext=customtext, xgap=xgap, ygap=ygap,
zgap=zgap, numpanels=numpanels,
rewriteModulefile=rewriteModulefile, glass=glass,
modulematerial=modulematerial, tubeParams=tubeParams,
**kwargs)
return self.module
def makeCustomObject(self, name=None, text=None):
"""
Function for development and experimenting with extraneous objects in the scene.
This function creates a `name.rad` textfile in the objects folder
with whatever text that is passed to it.
It is up to the user to pass the correct radiance format.
For example, to create a box at coordinates 0,0 (with its bottom surface
on the plane z=0):
.. code-block:
name = 'box'
text='! genbox black PVmodule 0.5 0.5 0.5 | xform -t -0.25 -0.25 0'
Parameters
----------
name : str
String input to name the module type
text : str
Text used in the radfile to generate the module
"""
customradfile = os.path.join('objects', '%s.rad'%(name)) # update in 0.2.3 to shorten radnames
# py2 and 3 compatible: binary write, encode text first
with open(customradfile, 'wb') as f:
f.write(text.encode('ascii'))
print("\nCustom Object Name", customradfile)
self.customradfile = customradfile
return customradfile
def printModules(self):
# print available module types from ModuleObj
from bifacial_radiance import ModuleObj
modulenames = ModuleObj().readModule()
print('Available module names: {}'.format([str(x) for x in modulenames]))
return modulenames
def makeScene(self, module=None, sceneDict=None, radname=None,
moduletype=None):
"""
Create a SceneObj which contains details of the PV system configuration including
tilt, row pitch, height, nMods per row, nRows in the system...
Parameters
----------
module : str or ModuleObj
String name of module created with makeModule()
sceneDict : dictionary
Dictionary with keys: `tilt`, `clearance_height`*, `pitch`,
`azimuth`, `nMods`, `nRows`, `hub_height`*, `height`*
* height deprecated from sceneDict. For makeScene (fixed systems)
if passed it is assumed it reffers to clearance_height.
`clearance_height` recommended for fixed_tracking systems.
`hub_height` can also be passed as a possibility.
radname : str
Gives a custom name to the scene file. Useful when parallelizing.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
-------
SceneObj
'scene' with configuration details
"""
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('makeScene(module, sceneDict, nMods, nRows). '+\
'Available moduletypes: ' )
self.printModules() #print available module types
return
self.scene = SceneObj(module)
self.scene.hpc = self.hpc #pass HPC mode from parent
if sceneDict is None:
print('makeScene(moduletype, sceneDict, nMods, nRows). '+\
'sceneDict inputs: .tilt .clearance_height .pitch .azimuth')
return self.scene
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
# Fixed tilt routine
# Preferred: clearance_height,
# If only height is passed, it is assumed to be clearance_height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='clearance_height',
nonpreferred='hub_height')
self.nMods = sceneDict['nMods']
self.nRows = sceneDict['nRows']
self.sceneRAD = self.scene._makeSceneNxR(sceneDict=sceneDict,
radname=radname)
if 'appendRadfile' not in sceneDict:
appendRadfile = False
else:
appendRadfile = sceneDict['appendRadfile']
if appendRadfile:
debug = False
try:
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDED!")
except:
#TODO: Manage situation where radfile was created with
#appendRadfile to False first..
self.radfiles=[]
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDAGE created!")
else:
self.radfiles = [self.sceneRAD]
return self.scene
def appendtoScene(self, radfile=None, customObject=None, text=''):
"""
Appends to the `Scene radfile` in folder `\objects` the text command in Radiance
lingo created by the user.
Useful when using addCustomObject to the scene.
Parameters
----------
radfile: str
Directory and name of where .rad scene file is stored
customObject : str
Directory and name of custom object .rad file is stored
text : str
Command to be appended to the radfile. Do not leave empty spaces
at the end.
Returns
-------
Nothing, the radfile must already be created and assigned when running this.
"""
#TODO: Add a custom name and replace radfile name
# py2 and 3 compatible: binary write, encode text first
text2 = '\n' + text + ' ' + customObject
debug = False
if debug:
print (text2)
with open(radfile, 'a+') as f:
f.write(text2)
def makeScene1axis(self, trackerdict=None, module=None, sceneDict=None,
cumulativesky=None, moduletype=None):
"""
Creates a SceneObj for each tracking angle which contains details of the PV
system configuration including row pitch, hub_height, nMods per row, nRows in the system...
Parameters
------------
trackerdict
Output from GenCumSky1axis
module : str or ModuleObj
Name or ModuleObj created with makeModule()
sceneDict :
Dictionary with keys:`tilt`, `hub_height`, `pitch`, `azimuth`
cumulativesky : bool
Defines if sky will be generated with cumulativesky or gendaylit.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
--------
trackerdict
Append the following keys
'radfile'
directory where .rad scene file is stored
'scene'
SceneObj for each tracker theta
'clearance_height'
Calculated ground clearance based on
`hub height`, `tilt` angle and overall collector width `sceney`
"""
import math
if sceneDict is None:
print('usage: makeScene1axis(module, sceneDict, nMods, nRows).'+
'sceneDict inputs: .hub_height .azimuth .nMods .nRows'+
'and .pitch or .gcr')
return
# If no nRows or nMods assigned on deprecated variable or dictionary,
# assign default.
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if cumulativesky is None:
try:
# see if cumulativesky = False was set earlier,
# e.g. in RadianceObj.set1axis
cumulativesky = self.cumulativesky
except AttributeError:
# default cumulativesky = true to maintain backward compatibility.
cumulativesky = True
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('usage: makeScene1axis(trackerdict, module, '+
'sceneDict, nMods, nRows). ')
self.printModules() #print available module types
return
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
# 1axis routine
# Preferred hub_height
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight:
simplefix = 0
hubheight = sceneDict['clearance_height'] # Not really, but this is the fastest
# to make it work with the simplefix as below the actual clearnace height
# gets calculated and the 0 sets the cosine correction to 0.
# TODO CLEAN THIS UP.
else:
#the hub height is the tracker height at center of rotation.
hubheight = sceneDict['hub_height']
simplefix = 1
if cumulativesky is True: # cumulativesky workflow
print('\nMaking .rad files for cumulativesky 1-axis workflow')
for theta in trackerdict:
scene = SceneObj(module)
if trackerdict[theta]['surf_azm'] >= 180:
trackerdict[theta]['surf_azm'] = trackerdict[theta]['surf_azm']-180
trackerdict[theta]['surf_tilt'] = trackerdict[theta]['surf_tilt']*-1
radname = '1axis%s_'%(theta,)
# Calculating clearance height for this theta.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
# Calculate the ground clearance height based on the hub height. Add abs(theta) to avoid negative tilt angle errors
trackerdict[theta]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed, not pitch
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[theta]['radfile'] = radfile
trackerdict[theta]['scene'] = scene
print('{} Radfiles created in /objects/'.format(trackerdict.__len__()))
else: #gendaylit workflow
print('\nMaking ~%s .rad files for gendaylit 1-axis workflow (this takes a minute..)' % (len(trackerdict)))
count = 0
for time in trackerdict:
scene = SceneObj(module)
if trackerdict[time]['surf_azm'] >= 180:
trackerdict[time]['surf_azm'] = trackerdict[time]['surf_azm']-180
trackerdict[time]['surf_tilt'] = trackerdict[time]['surf_tilt']*-1
theta = trackerdict[time]['theta']
radname = '1axis%s_'%(time,)
# Calculating clearance height for this time.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
if trackerdict[time]['ghi'] > 0:
trackerdict[time]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed instead of pitch
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[time]['radfile'] = radfile
trackerdict[time]['scene'] = scene
count+=1
print('{} Radfiles created in /objects/'.format(count))
self.trackerdict = trackerdict
self.nMods = sceneDict['nMods'] #assign nMods and nRows to RadianceObj
self.nRows = sceneDict['nRows']
self.hub_height = hubheight
return trackerdict
def analysis1axis(self, trackerdict=None, singleindex=None, accuracy='low',
customname=None, modWanted=None, rowWanted=None,
sensorsy=9, sensorsx=1,
modscanfront = None, modscanback = None, relative=False,
debug=False ):
"""
Loop through trackerdict and runs linescans for each scene and scan in there.
Parameters
----------------
trackerdict
singleindex : str
For single-index mode, just the one index we want to run (new in 0.2.3).
Example format '21_06_14_12_30' for 2021 June 14th 12:30 pm
accuracy : str
'low' or 'high', resolution option used during _irrPlot and rtrace
customname : str
Custom text string to be added to the file name for the results .CSV files
modWanted : int
Module to be sampled. Index starts at 1.
rowWanted : int
Row to be sampled. Index starts at 1. (row 1)
sensorsy : int or list
Number of 'sensors' or scanning points along the collector width
(CW) of the module(s). If multiple values are passed, first value
represents number of front sensors, second value is number of back sensors
sensorsx : int or list
Number of 'sensors' or scanning points along the length, the side perpendicular
to the collector width (CW) of the module(s) for the back side of the module.
If multiple values are passed, first value represents number of
front sensors, second value is number of back sensors.
modscanfront : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
modscanback : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
debug : Bool
Activates internal printing of the function to help debugging.
Returns
-------
trackerdict with new keys:
'AnalysisObj' : analysis object for this tracker theta
'Wm2Front' : list of front Wm-2 irradiances, len=sensorsy_back
'Wm2Back' : list of rear Wm-2 irradiances, len=sensorsy_back
'backRatio' : list of rear irradiance ratios, len=sensorsy_back
RadianceObj with new appended values:
'Wm2Front' : np Array with front irradiance cumulative
'Wm2Back' : np Array with rear irradiance cumulative
'backRatio' : np Array with rear irradiance ratios
"""
import warnings
if customname is None:
customname = ''
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # run over all values in trackerdict
trackerkeys = sorted(trackerdict.keys())
else: # run in single index mode.
trackerkeys = [singleindex]
if modWanted == None:
modWanted = round(self.nMods / 1.99)
if rowWanted == None:
rowWanted = round(self.nRows / 1.99)
frontWm2 = 0 # container for tracking front irradiance across module chord. Dynamically size based on first analysis run
backWm2 = 0 # container for tracking rear irradiance across module chord.
for index in trackerkeys: # either full list of trackerdict keys, or single index
name = '1axis_%s%s'%(index,customname)
octfile = trackerdict[index]['octfile']
scene = trackerdict[index]['scene']
if octfile is None:
continue # don't run analysis if the octfile is none
try: # look for missing data
analysis = AnalysisObj(octfile,name)
name = '1axis_%s%s'%(index,customname,)
frontscanind, backscanind = analysis.moduleAnalysis(scene=scene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
analysis.analysis(octfile=octfile,name=name,frontscan=frontscanind,backscan=backscanind,accuracy=accuracy)
trackerdict[index]['AnalysisObj'] = analysis
except Exception as e: # problem with file. TODO: only catch specific error types here.
warnings.warn('Index: {}. Problem with file. Error: {}. Skipping'.format(index,e), Warning)
return
#combine cumulative front and back irradiance for each tracker angle
try: #on error, trackerdict[index] is returned empty
trackerdict[index]['Wm2Front'] = analysis.Wm2Front
trackerdict[index]['Wm2Back'] = analysis.Wm2Back
trackerdict[index]['backRatio'] = analysis.backRatio
except AttributeError as e: # no key Wm2Front.
warnings.warn('Index: {}. Trackerdict key not found: {}. Skipping'.format(index,e), Warning)
return
if np.sum(frontWm2) == 0: # define frontWm2 the first time through
frontWm2 = np.array(analysis.Wm2Front)
backWm2 = np.array(analysis.Wm2Back)
else:
frontWm2 += np.array(analysis.Wm2Front)
backWm2 += np.array(analysis.Wm2Back)
print('Index: {}. Wm2Front: {}. Wm2Back: {}'.format(index,
np.mean(analysis.Wm2Front), np.mean(analysis.Wm2Back)))
if np.sum(self.Wm2Front) == 0:
self.Wm2Front = frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back = backWm2
else:
self.Wm2Front += frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back += backWm2
self.backRatio = np.mean(backWm2)/np.mean(frontWm2+.001)
# Save compiled results using _saveresults
if singleindex is None:
print ("Saving a cumulative-results file in the main simulation folder." +
"This adds up by sensor location the irradiance over all hours " +
"or configurations considered." +
"\nWarning: This file saving routine does not clean results, so "+
"if your setup has ygaps, or 2+modules or torque tubes, doing "+
"a deeper cleaning and working with the individual results "+
"files in the results folder is highly suggested.")
cumfilename = 'cumulative_results_%s.csv'%(customname)
if self.cumulativesky is True:
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
temptrackerdict = trackerdict[list(trackerdict)[0]]['AnalysisObj']
#temptrackerdict = trackerdict[0.0]['AnalysisObj']
frontcum ['x'] = temptrackerdict.x
frontcum ['y'] = temptrackerdict.y
frontcum ['z'] = temptrackerdict.z
frontcum ['mattype'] = temptrackerdict.mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = temptrackerdict.x
rearcum ['y'] = temptrackerdict.x
rearcum ['z'] = temptrackerdict.rearZ
rearcum ['mattype'] = temptrackerdict.rearMat
rearcum ['Wm2'] = self.Wm2Back
cumanalysisobj = AnalysisObj()
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
else: # trackerkeys are day/hour/min, and there's no easy way to find a
# tilt of 0, so making a fake linepoint object for tilt 0
# and then saving.
try:
cumscene = trackerdict[trackerkeys[0]]['scene']
cumscene.sceneDict['tilt']=0
cumscene.sceneDict['clearance_height'] = self.hub_height
cumanalysisobj = AnalysisObj()
frontscancum, backscancum = cumanalysisobj.moduleAnalysis(scene=cumscene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
x,y,z = cumanalysisobj._linePtsArray(frontscancum)
x,y,rearz = cumanalysisobj._linePtsArray(backscancum)
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
frontcum ['x'] = x
frontcum ['y'] = y
frontcum ['z'] = z
frontcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = x
rearcum ['y'] = y
rearcum ['z'] = rearz
rearcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].rearMat
rearcum ['Wm2'] = self.Wm2Back
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
except:
print("Not able to save a cumulative result for this simulation.")
return trackerdict
# End RadianceObj definition
class GroundObj:
"""
Class to set and return details for the ground surface materials and reflectance.
If 1 albedo value is passed, it is used as default.
If 3 albedo values are passed, they are assigned to each of the three wavelength placeholders (RGB),
If material type is known, it is used to get reflectance info.
if material type isn't known, material_info.list is returned
Parameters
------------
materialOrAlbedo : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
"""
def __init__(self, materialOrAlbedo=None, material_file=None):
import warnings
from numbers import Number
self.normval = None
self.ReflAvg = None
self.Rrefl = None
self.Grefl = None
self.Brefl = None
self.ground_type = 'custom'
if material_file is None:
material_file = 'ground.rad'
self.material_file = material_file
if materialOrAlbedo is None: # Case where it's none.
print('\nInput albedo 0-1, or string from ground.printGroundMaterials().'
'\nAlternatively, run setGround after readWeatherData()'
'and setGround will read metdata.albedo if available')
return
if isinstance(materialOrAlbedo, str) :
self.ground_type = materialOrAlbedo
# Return the RGB albedo for material ground_type
materialOrAlbedo = self.printGroundMaterials(self.ground_type)
# Check for double and int.
if isinstance(materialOrAlbedo, Number):
materialOrAlbedo = np.array([[materialOrAlbedo,
materialOrAlbedo, materialOrAlbedo]])
if isinstance(materialOrAlbedo, list):
materialOrAlbedo = np.asarray(materialOrAlbedo)
# By this point, materialOrAlbedo should be a np.ndarray:
if isinstance(materialOrAlbedo, np.ndarray):
if materialOrAlbedo.ndim == 0:
# numpy array of one single value, i.e. np.array(0.62)
# after this if, np.array([0.62])
materialOrAlbedo = materialOrAlbedo.reshape([1])
if materialOrAlbedo.ndim == 1:
# If np.array is ([0.62]), this repeats it so at the end it's
# np.array ([0.62, 0.62, 0.62])
materialOrAlbedo = np.repeat(np.array([materialOrAlbedo]),
3, axis=1).reshape(
len(materialOrAlbedo),3)
if (materialOrAlbedo.ndim == 2) & (materialOrAlbedo.shape[1] > 3):
warnings.warn("Radiance only raytraces 3 wavelengths at "
"a time. Trimming albedo np.array input to "
"3 wavelengths.")
materialOrAlbedo = materialOrAlbedo[:,0:3]
# By this point we should have np.array of dim=2 and shape[1] = 3.
# Check for invalid values
if (materialOrAlbedo > 1).any() or (materialOrAlbedo < 0).any():
print('Warning: albedo values greater than 1 or less than 0. '
'Constraining to [0..1]')
materialOrAlbedo = materialOrAlbedo.clip(min=0, max=1)
try:
self.Rrefl = materialOrAlbedo[:,0]
self.Grefl = materialOrAlbedo[:,1]
self.Brefl = materialOrAlbedo[:,2]
self.normval = _normRGB(materialOrAlbedo[:,0],materialOrAlbedo[:,1],
materialOrAlbedo[:,2])
self.ReflAvg = np.round(np.mean(materialOrAlbedo, axis=1),4)
print(f'Loading albedo, {self.ReflAvg.__len__()} value(s), '
f'{self._nonzeromean(self.ReflAvg):0.3f} avg\n'
f'{self.ReflAvg[self.ReflAvg != 0].__len__()} nonzero albedo values.')
except IndexError as e:
print('albedo.shape should be 3 column (N x 3)')
raise e
def printGroundMaterials(self, materialString=None):
"""
printGroundMaterials(materialString=None)
input: None or materialString. If None, return list of acceptable
material types from ground.rad. If valid string, return RGB albedo
of the material type selected.
"""
import warnings
material_path = 'materials'
f = open(os.path.join(material_path, self.material_file))
keys = [] #list of material key names
Rreflall = []; Greflall=[]; Breflall=[] #RGB material reflectance
temp = f.read().split()
f.close()
#return indices for 'plastic' definition
index = _findme(temp,'plastic')
for i in index:
keys.append(temp[i+1])# after plastic comes the material name
Rreflall.append(float(temp[i+5]))#RGB reflectance comes a few more down the list
Greflall.append(float(temp[i+6]))
Breflall.append(float(temp[i+7]))
if materialString is not None:
try:
index = _findme(keys,materialString)[0]
except IndexError:
warnings.warn('Error - materialString not in '
f'{self.material_file}: {materialString}')
return(np.array([[Rreflall[index], Greflall[index], Breflall[index]]]))
else:
return(keys)
def _nonzeromean(self, val):
''' array mean excluding zero. return zero if everything's zero'''
tempmean = np.nanmean(val)
if tempmean > 0:
tempmean = np.nanmean(val[val !=0])
return tempmean
def _makeGroundString(self, index=0, cumulativesky=False):
'''
create string with ground reflectance parameters for use in
gendaylit and gencumsky.
Parameters
-----------
index : integer
Index of time for time-series albedo. Default 0
cumulativesky: Boolean
If true, set albedo to average of time series values.
Returns
-------
groundstring: text with albedo details to append to sky.rad in
gendaylit
'''
try:
if cumulativesky is True:
Rrefl = self._nonzeromean(self.Rrefl)
Grefl = self._nonzeromean(self.Grefl)
Brefl = self._nonzeromean(self.Brefl)
normval = _normRGB(Rrefl, Grefl, Brefl)
else:
Rrefl = self.Rrefl[index]
Grefl = self.Grefl[index]
Brefl = self.Brefl[index]
normval = _normRGB(Rrefl, Grefl, Brefl)
# Check for all zero albedo case
if normval == 0:
normval = 1
groundstring = ( f'\nskyfunc glow ground_glow\n0\n0\n4 '
f'{Rrefl/normval} {Grefl/normval} {Brefl/normval} 0\n'
'\nground_glow source ground\n0\n0\n4 0 0 -1 180\n'
f'\nvoid plastic {self.ground_type}\n0\n0\n5 '
f'{Rrefl:0.3f} {Grefl:0.3f} {Brefl:0.3f} 0 0\n'
f"\n{self.ground_type} ring groundplane\n"
'0\n0\n8\n0 0 -.01\n0 0 1\n0 100' )
except IndexError as err:
print(f'Index {index} passed to albedo with only '
f'{self.Rrefl.__len__()} values.' )
raise err
return groundstring
class SceneObj:
'''
scene information including PV module type, bifaciality, array info
pv module orientation defaults: Azimuth = 180 (south)
pv module origin: z = 0 bottom of frame. y = 0 lower edge of frame.
x = 0 vertical centerline of module
scene includes module details (x,y,bifi, sceney (collector_width), scenex)
'''
def __repr__(self):
return str(self.__dict__)
def __init__(self, module=None):
''' initialize SceneObj
'''
from bifacial_radiance import ModuleObj
# should sceneDict be initialized here? This is set in _makeSceneNxR
if module is None:
return
elif type(module) == str:
self.module = ModuleObj(name=module)
elif type(module) == ModuleObj: # try moduleObj
self.module = module
#self.moduleDict = self.module.getDataDict()
#self.scenex = self.module.scenex
#self.sceney = self.module.sceney
#self.offsetfromaxis = self.moduleDict['offsetfromaxis']
#TODO: get rid of these 4 values
self.modulefile = self.module.modulefile
self.hpc = False #default False. Set True by makeScene after sceneobj created.
def _makeSceneNxR(self, modulename=None, sceneDict=None, radname=None):
"""
Arrange module defined in :py:class:`bifacial_radiance.SceneObj` into a N x R array.
Returns a :py:class:`bifacial_radiance.SceneObj` which contains details
of the PV system configuration including `tilt`, `row pitch`, `hub_height`
or `clearance_height`, `nMod`s per row, `nRows` in the system.
The returned scene has (0,0) coordinates centered at the module at the
center of the array. For 5 rows, that is row 3, for 4 rows, that is
row 2 also (rounds down). For 5 modules in the row, that is module 3,
for 4 modules in the row, that is module 2 also (rounds down)
Parameters
------------
modulename: str
Name of module created with :py:class:`~bifacial_radiance.RadianceObj.makeModule`.
sceneDict : dictionary
Dictionary of scene parameters.
clearance_height : numeric
(meters).
pitch : numeric
Separation between rows
tilt : numeric
Valid input ranges -90 to 90 degrees
azimuth : numeric
A value denoting the compass direction along which the
axis of rotation lies. Measured in decimal degrees East
of North. [0 to 180) possible.
nMods : int
Number of modules per row (default = 20)
nRows : int
Number of rows in system (default = 7)
radname : str
String for name for radfile.
Returns
-------
radfile : str
Filename of .RAD scene in /objects/
scene : :py:class:`~bifacial_radiance.SceneObj `
Returns a `SceneObject` 'scene' with configuration details
"""
if modulename is None:
modulename = self.module.name
if sceneDict is None:
print('makeScene(modulename, sceneDict, nMods, nRows). sceneDict'
' inputs: .tilt .azimuth .nMods .nRows'
' AND .tilt or .gcr ; AND .hub_height or .clearance_height')
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'axis_tilt' not in sceneDict:
sceneDict['axis_tilt'] = 0
if 'originx' not in sceneDict:
sceneDict['originx'] = 0
if 'originy' not in sceneDict:
sceneDict['originy'] = 0
if radname is None:
radname = str(self.module.name).strip().replace(' ', '_')
# loading variables
tilt = sceneDict['tilt']
azimuth = sceneDict['azimuth']
nMods = sceneDict['nMods']
nRows = sceneDict['nRows']
axis_tilt = sceneDict['axis_tilt']
originx = sceneDict ['originx']
originy = sceneDict['originy']
# hub_height, clearance_height and height logic.
# this routine uses hub_height to move the panels up so it's important
# to have a value for that, either obtianing from clearance_height
# (if coming from makeScene) or from hub_height itself.
# it is assumed that if no clearance_height or hub_height is passed,
# hub_height = height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict, preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight :
hubheight = sceneDict['clearance_height'] + 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney - self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
title_clearance_height = sceneDict['clearance_height']
else:
hubheight = sceneDict['hub_height']
# this calculates clearance_height, used for the title
title_clearance_height = sceneDict['hub_height'] - 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney + self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
try:
if sceneDict['pitch'] >0:
pitch = sceneDict['pitch']
else:
raise Exception('default to gcr')
except:
if 'gcr' in sceneDict:
pitch = np.round(self.module.sceney/sceneDict['gcr'],3)
else:
raise Exception('No valid `pitch` or `gcr` in sceneDict')
''' INITIALIZE VARIABLES '''
text = '!xform '
text += '-rx %s -t %s %s %s ' %(tilt, 0, 0, hubheight)
# create nMods-element array along x, nRows along y. 1cm module gap.
text += '-a %s -t %s 0 0 -a %s -t 0 %s 0 ' %(nMods, self.module.scenex, nRows, pitch)
# azimuth rotation of the entire shebang. Select the row to scan here based on y-translation.
# Modifying so center row is centered in the array. (i.e. 3 rows, row 2. 4 rows, row 2 too)
# Since the array is already centered on row 1, module 1, we need to increment by Nrows/2-1 and Nmods/2-1
text += (f'-i 1 -t {-self.module.scenex*(round(nMods/1.999)*1.0-1)} '
f'{-pitch*(round(nRows / 1.999)*1.0-1)} 0 -rz {180-azimuth} '
f'-t {originx} {originy} 0 ' )
#axis tilt only working for N-S trackers
if axis_tilt != 0 and azimuth == 90:
print("Axis_Tilt is still under development. The scene will be "
"created with the proper axis tilt, and the tracking angle"
"will consider the axis_tilt, but the sensors for the "
"analysis might not fall in the correct surfaces unless you"
" manually position them for this version. Sorry! :D ")
text += (f'-rx {axis_tilt} -t 0 0 %s ' %(
self.module.scenex*(round(nMods/1.99)*1.0-1)*np.sin(
axis_tilt * np.pi/180) ) )
filename = (f'{radname}_C_{title_clearance_height:0.5f}_rtr_{pitch:0.5f}_tilt_{tilt:0.5f}_'
f'{nMods}modsx{nRows}rows_origin{originx},{originy}.rad' )
if self.hpc:
text += f'"{os.path.join(os.getcwd(), self.modulefile)}"'
radfile = os.path.join(os.getcwd(), 'objects', filename)
else:
text += os.path.join(self.modulefile)
radfile = os.path.join('objects',filename )
# py2 and 3 compatible: binary write, encode text first
with open(radfile, 'wb') as f:
f.write(text.encode('ascii'))
self.gcr = self.module.sceney / pitch
self.text = text
self.radfiles = radfile
self.sceneDict = sceneDict
# self.hub_height = hubheight
return radfile
def showScene(self):
"""
Method to call objview on the scene included in self
"""
cmd = 'objview %s %s' % (os.path.join('materials', 'ground.rad'),
self.radfiles)
print('Rendering scene. This may take a moment...')
_,err = _popen(cmd,None)
if err is not None:
print('Error: {}'.format(err))
print('possible solution: install radwinexe binary package from '
'http://www.jaloxa.eu/resources/radiance/radwinexe.shtml'
' into your RADIANCE binaries path')
return
# end of SceneObj
class MetObj:
"""
Meteorological data from EPW file.
Initialize the MetObj from tmy data already read in.
Parameters
-----------
tmydata : DataFrame
TMY3 output from :py:class:`~bifacial_radiance.RadianceObj.readTMY` or
from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
metadata : Dictionary
Metadata output from output from :py:class:`~bifacial_radiance.RadianceObj.readTMY``
or from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
label : str
label : str
'left', 'right', or 'center'. For data that is averaged, defines if the
timestamp refers to the left edge, the right edge, or the center of the
averaging interval, for purposes of calculating sunposition. For
example, TMY3 data is right-labeled, so 11 AM data represents data from
10 to 11, and sun position should be calculated at 10:30 AM. Currently
SAM and PVSyst use left-labeled interval data and NSRDB uses centered.
"""
def __init__(self, tmydata, metadata, label = 'right'):
import pytz
import pvlib
#import numpy as np
#First prune all GHI = 0 timepoints. New as of 0.4.0
# TODO: is this a good idea? This changes default behavior...
tmydata = tmydata[tmydata.GHI > 0]
# location data. so far needed:
# latitude, longitude, elevation, timezone, city
self.latitude = metadata['latitude']; lat=self.latitude
self.longitude = metadata['longitude']; lon=self.longitude
self.elevation = metadata['altitude']; elev=self.elevation
self.timezone = metadata['TZ']
try:
self.city = metadata['Name'] # readepw version
except KeyError:
self.city = metadata['city'] # pvlib version
#self.location.state_province_region = metadata['State'] # unecessary
self.datetime = tmydata.index.tolist() # this is tz-aware.
self.ghi = np.array(tmydata.GHI)
self.dhi = np.array(tmydata.DHI)
self.dni = np.array(tmydata.DNI)
try:
self.albedo = np.array(tmydata.Alb)
except AttributeError: # no TMY albedo data
self.albedo = None
# Try and retrieve dewpoint and pressure
try:
self.dewpoint = np.array(tmydata['temp_dew'])
except KeyError:
self.dewpoint = None
try:
self.pressure = np.array(tmydata['atmospheric_pressure'])
except KeyError:
self.pressure = None
try:
self.temp_air = np.array(tmydata['temp_air'])
except KeyError:
self.temp_air = None
try:
self.wind_speed = np.array(tmydata['wind_speed'])
except KeyError:
self.wind_speed = None
# Try and retrieve TrackerAngle
try:
self.meastracker_angle = np.array(tmydata['Tracker Angle (degrees)'])
except KeyError:
self.meastracker_angle= None
#v0.2.5: initialize MetObj with solpos, sunrise/set and corrected time
datetimetz = | pd.DatetimeIndex(self.datetime) | pandas.DatetimeIndex |
import gc
import numpy as np
from pandas import (
DatetimeIndex,
Float64Index,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
date_range,
)
from .pandas_vb_common import tm
class SetOperations:
params = (
["datetime", "date_string", "int", "strings"],
["intersection", "union", "symmetric_difference"],
)
param_names = ["dtype", "method"]
def setup(self, dtype, method):
N = 10 ** 5
dates_left = date_range("1/1/2000", periods=N, freq="T")
fmt = "%Y-%m-%d %H:%M:%S"
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {
"datetime": {"left": dates_left, "right": dates_left[:-1]},
"date_string": {"left": date_str_left, "right": date_str_left[:-1]},
"int": {"left": int_left, "right": int_left[:-1]},
"strings": {"left": str_left, "right": str_left[:-1]},
}
self.left = data[dtype]["left"]
self.right = data[dtype]["right"]
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint:
def setup(self):
N = 10 ** 5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Range:
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10 ** 6, step=3)
self.idx_dec = RangeIndex(start=10 ** 6, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
def time_get_loc_inc(self):
self.idx_inc.get_loc(900_000)
def time_get_loc_dec(self):
self.idx_dec.get_loc(100_000)
def time_iter_inc(self):
for _ in self.idx_inc:
pass
def time_iter_dec(self):
for _ in self.idx_dec:
pass
class IndexEquals:
def setup(self):
idx_large_fast = RangeIndex(100_000)
idx_small_slow = date_range(start="1/1/2012", periods=1)
self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow])
self.idx_non_object = RangeIndex(1)
def time_non_object_equals_multiindex(self):
self.idx_non_object.equals(self.mi_large_slow)
class IndexAppend:
def setup(self):
N = 10_000
self.range_idx = | RangeIndex(0, 100) | pandas.RangeIndex |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.